code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1000 ):
snake_case_, snake_case_ = 1, 1
snake_case_ = []
for i in range(1 , n + 1 ):
snake_case_ = prev_numerator + 2 * prev_denominator
snake_case_ = prev_numerator + prev_denominator
if len(str(SCREAMING_SNAKE_CASE__ ) ) > len(str(SCREAMING_SNAKE_CASE__ ) ):
result.append(SCREAMING_SNAKE_CASE__ )
snake_case_ = numerator
snake_case_ = denominator
return len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( _UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =str(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[n]
for i in range(1 , len(_UpperCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
if len(str(_UpperCamelCase ) ) > 3:
if not is_prime(int(str(_UpperCamelCase )[-3:] ) ) or not is_prime(int(str(_UpperCamelCase )[:3] ) ):
return False
return True
def _lowerCAmelCase ( _UpperCamelCase : int = 11 ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =13
while len(_UpperCamelCase ) != count:
if validate(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =list_truncated_nums(_UpperCamelCase )
if all(is_prime(_UpperCamelCase ) for i in list_nums ):
list_truncated_primes.append(_UpperCamelCase )
num += 2
return list_truncated_primes
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(1_1)) = }''')
| 47 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase_ )
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : str
_UpperCAmelCase : str
_UpperCAmelCase : Optional[str] = None
_UpperCAmelCase : Optional[str] = None
_UpperCAmelCase : Optional[str] = None
@dataclass(frozen=UpperCAmelCase_ )
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : List[int]
_UpperCAmelCase : Optional[List[int]] = None
_UpperCAmelCase : Optional[List[int]] = None
_UpperCAmelCase : Optional[Union[int, float]] = None
_UpperCAmelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[InputFeatures]
def __init__( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int=False , lowerCAmelCase__ : bool = False , ):
SCREAMING_SNAKE_CASE_: List[str] = hans_processors[task]()
SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.join(
lowerCAmelCase__ , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(lowerCAmelCase__) , lowerCAmelCase__ , ) , )
SCREAMING_SNAKE_CASE_: List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_: Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_: Tuple = cached_features_file + ".lock"
with FileLock(lowerCAmelCase__):
if os.path.exists(lowerCAmelCase__) and not overwrite_cache:
logger.info(F"Loading features from cached file {cached_features_file}")
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.load(lowerCAmelCase__)
else:
logger.info(F"Creating features from dataset file at {data_dir}")
SCREAMING_SNAKE_CASE_: Tuple = (
processor.get_dev_examples(lowerCAmelCase__) if evaluate else processor.get_train_examples(lowerCAmelCase__)
)
logger.info("Training examples: %s" , len(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: List[Any] = hans_convert_examples_to_features(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
logger.info("Saving features into cached file %s" , lowerCAmelCase__)
torch.save(self.features , lowerCAmelCase__)
def __len__( self : Any):
return len(self.features)
def __getitem__( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.features[i]
def _SCREAMING_SNAKE_CASE ( self : Dict):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : List[InputFeatures]
def __init__( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : bool = False , ):
SCREAMING_SNAKE_CASE_: List[str] = hans_processors[task]()
SCREAMING_SNAKE_CASE_: int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_: Optional[Any] = label_list
SCREAMING_SNAKE_CASE_: List[Any] = processor.get_dev_examples(lowerCAmelCase__) if evaluate else processor.get_train_examples(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = hans_convert_examples_to_features(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features) , desc="convert examples to features"):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(lowerCAmelCase__)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_: List[Any] = tf.data.Dataset.from_generator(
lowerCAmelCase__ , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([]),
"input_ids": tf.TensorShape([None, None]),
"attention_mask": tf.TensorShape([None, None]),
"token_type_ids": tf.TensorShape([None, None]),
},
tf.TensorShape([]),
) , )
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return self.dataset
def __len__( self : Optional[int]):
return len(self.features)
def __getitem__( self : List[str] , lowerCAmelCase__ : Optional[Any]):
return self.features[i]
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return self.label_list
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[str, Any]):
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase__ , "heuristics_train_set.txt")) , "train")
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int]):
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase__ , "heuristics_evaluation_set.txt")) , "dev")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return ["contradiction", "entailment", "neutral"]
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: int = []
for i, line in enumerate(lowerCAmelCase__):
if i == 0:
continue
SCREAMING_SNAKE_CASE_: int = "%s-%s" % (set_type, line[0])
SCREAMING_SNAKE_CASE_: Tuple = line[5]
SCREAMING_SNAKE_CASE_: Union[str, Any] = line[6]
SCREAMING_SNAKE_CASE_: Any = line[7][2:] if line[7].startswith("ex") else line[7]
SCREAMING_SNAKE_CASE_: Union[str, Any] = line[0]
examples.append(InputExample(guid=lowerCAmelCase__ , text_a=lowerCAmelCase__ , text_b=lowerCAmelCase__ , label=lowerCAmelCase__ , pairID=lowerCAmelCase__))
return examples
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: Optional[int] = {label: i for i, label in enumerate(_UpperCAmelCase )}
SCREAMING_SNAKE_CASE_: Any = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
SCREAMING_SNAKE_CASE_: int = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: str = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
lowerCAmelCase : List[str] = {
"""hans""": 3,
}
lowerCAmelCase : List[str] = {
"""hans""": HansProcessor,
}
| 127 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase : Union[str, Any] = get_tests_dir("""fixtures/dummy-config.json""")
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Dict = 0
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertIsNotNone(transformers.models.auto.__spec__)
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto"))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = AutoConfig.from_pretrained("bert-base-uncased")
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[Any] = AutoConfig.for_model("roberta")
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE_: int = os.path.join(lowerCAmelCase__ , "fake-roberta")
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__ , "config.json") , "w") as f:
f.write(json.dumps({}))
SCREAMING_SNAKE_CASE_: Any = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertEqual(type(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
try:
AutoConfig.register("custom" , lowerCAmelCase__)
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase__):
AutoConfig.register("model" , lowerCAmelCase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__):
AutoConfig.register("bert" , lowerCAmelCase__)
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_: List[Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _SCREAMING_SNAKE_CASE ( self : List[str]):
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier"):
SCREAMING_SNAKE_CASE_: List[str] = AutoConfig.from_pretrained("bert-base")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
with self.assertRaisesRegex(
lowerCAmelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="aaaaaa")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo")
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model")
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(config.__class__.__name__ , "NewModelConfig")
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__)
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = '''new-model'''
try:
AutoConfig.register("new-model" , lowerCAmelCase__)
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model")
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal")
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_: Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal")
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(config.__class__.__name__ , "NewModelConfig")
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 127 | 1 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A : Optional[Any] = object()
# For specifying empty leaf dict `{}`
A : Any = object()
def lowercase_ ( _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(lowerCAmelCase_ ) - len(lowerCAmelCase_ ) + 1 ):
lowerCamelCase__ : List[Any] = [x.match(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , ks[i:] )]
if matches and all(lowerCAmelCase_ ):
return True
return False
def lowercase_ ( _A : Union[str, Any] ):
"""simple docstring"""
def replace(_A : Tuple , _A : Union[str, Any] ):
for rule, replacement in rules:
if _match(lowerCAmelCase_ , lowerCAmelCase_ ):
return replacement
return val
return replace
def lowercase_ ( ):
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , lowerCAmelCase_ )),
(("transformer", "wte", "embedding"), P("mp" , lowerCAmelCase_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCAmelCase_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , lowerCAmelCase_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCAmelCase_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , lowerCAmelCase_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Tuple = _get_partition_rules()
lowerCamelCase__ : List[Any] = _replacement_rules(lowerCAmelCase_ )
lowerCamelCase__ : str = {k: _unmatched for k in flatten_dict(lowerCAmelCase_ )}
lowerCamelCase__ : Optional[int] = {k: replace(lowerCAmelCase_ , lowerCAmelCase_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCAmelCase_ ) )
| 184 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase =16
_lowerCamelCase =32
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE =8
else:
SCREAMING_SNAKE_CASE =None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase =mocked_dataloaders # noqa: F811
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
SCREAMING_SNAKE_CASE =2
# Initialize accelerator
SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE =config['lr']
SCREAMING_SNAKE_CASE =int(config['num_epochs'] )
SCREAMING_SNAKE_CASE =int(config['seed'] )
SCREAMING_SNAKE_CASE =int(config['batch_size'] )
SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 334 | 0 |
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Any ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''') | 361 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ : str = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = 0
def lowerCAmelCase_ ( self : Optional[int] ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = AutoConfig.for_model('roberta' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , 'fake-roberta' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertEqual(type(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
try:
AutoConfig.register('custom' , _lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoConfig.register('model' , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoConfig.register('bert' , _lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCAmelCase_ ( self : Optional[int] ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('bert-base' )
def lowerCAmelCase_ ( self : int ):
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase , revision='aaaaaa' )
def lowerCAmelCase_ ( self : Tuple ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def lowerCAmelCase_ ( self : Any ):
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "new-model"
try:
AutoConfig.register('new-model' , _lowerCAmelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"] | 210 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self ):
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_init_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_train_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_train_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_epoch_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_epoch_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_step_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_step_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_evaluate' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_predict' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_save' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_log' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_prediction_step' )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 , snake_case=0 , snake_case=64 , snake_case=64 , snake_case=None , snake_case=False , **snake_case ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase = RegressionDataset(length=snake_case )
lowercase = RegressionDataset(length=snake_case )
lowercase = RegressionModelConfig(a=snake_case , b=snake_case )
lowercase = RegressionPreTrainedModel(snake_case )
lowercase = TrainingArguments(self.output_dir , disable_tqdm=snake_case , report_to=[] , **snake_case )
return Trainer(
snake_case , snake_case , train_dataset=snake_case , eval_dataset=snake_case , callbacks=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
self.assertEqual(len(snake_case ) , len(snake_case ) )
# Order doesn't matter
lowercase = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
lowercase = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case , snake_case ):
if isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , snake_case )
elif isinstance(snake_case , snake_case ) and not isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , cba.__class__ )
elif not isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(cba.__class__ , snake_case )
else:
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = ['on_init_end', 'on_train_begin']
lowercase = 0
lowercase = len(trainer.get_eval_dataloader() )
lowercase = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(snake_case ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_trainer()
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# Callbacks passed at init are added to the default callbacks
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase = self.get_trainer(disable_tqdm=snake_case )
lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
lowercase = self.get_trainer()
lowercase = trainer.pop_callback(snake_case )
self.assertEqual(cb.__class__ , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# We can also add, pop, or remove by instance
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
lowercase = trainer.pop_callback(snake_case )
self.assertEqual(snake_case , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=snake_case )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# Independent log/save/eval
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# A bit of everything
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case ) in warn_mock.call_args[0][0]
| 195 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase =logging.get_logger(__name__)
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = PILImageResampling.BICUBIC ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = True ,lowerCamelCase_ = 1 / 2_5_5 ,lowerCamelCase_ = True ,lowerCamelCase_ = IMAGENET_DEFAULT_MEAN ,lowerCamelCase_ = IMAGENET_DEFAULT_STD ,**lowerCamelCase_ ,) -> None:
super().__init__(**lowerCamelCase_ )
A = size if size is not None else {"""shortest_edge""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = PILImageResampling.BICUBIC ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
A = int((2_5_6 / 2_2_4) * size["""shortest_edge"""] )
A = get_resize_output_image_size(lowerCamelCase_ ,size=lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
lowerCamelCase_ ,size=(size_dict["""height"""], size_dict["""width"""]) ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = ChannelDimension.FIRST ,**lowerCamelCase_ ,) -> BatchFeature:
A = do_resize if do_resize is not None else self.do_resize
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
A = [self.resize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_center_crop:
A = [self.center_crop(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_rescale:
A = [self.rescale(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_normalize:
A = [self.normalize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
A = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
A = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 77 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=1_2 ,lowerCamelCase_=7 ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=9_9 ,lowerCamelCase_=3_2 ,lowerCamelCase_=3_2 ,lowerCamelCase_=2 ,lowerCamelCase_=4 ,lowerCamelCase_=3_7 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=0 ,lowerCamelCase_=None ,) -> List[str]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_labels
A = vocab_size
A = hidden_size
A = projection_dim
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = dropout
A = attention_dropout
A = max_position_embeddings
A = initializer_range
A = scope
A = bos_token_id
def UpperCamelCase__ ( self ) -> Tuple:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A = input_mask.numpy()
A , A = input_mask.shape
A = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
A = 1
A = 0
A = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> int:
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple:
A = TFBlipTextModel(config=lowerCamelCase_ )
A = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,training=lowerCamelCase_ )
A = model(lowerCamelCase_ ,training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase__ ( self ) -> List[str]:
A = BlipTextModelTester(self )
A = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=3_7 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
def UpperCamelCase__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def UpperCamelCase__ ( self ) -> Dict:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def UpperCamelCase__ ( self ) -> str:
pass
@slow
def UpperCamelCase__ ( self ) -> str:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFBlipTextModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_=True ) -> str:
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCamelCase_ )
| 77 | 1 |
'''simple docstring'''
from functools import reduce
__a = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __snake_case( _lowerCAmelCase = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowerCAmelCase , _lowerCAmelCase : str(int(_lowerCAmelCase ) * int(_lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 35 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]="shi-labs/oneformer_demo" ) -> int:
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) as f:
A_ : Optional[int] = json.load(_lowerCAmelCase )
A_ : Union[str, Any] = {}
A_ : Tuple = []
A_ : Optional[Any] = []
for key, info in class_info.items():
A_ : Tuple = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
A_ : Optional[Any] = thing_ids
A_ : int = class_names
return metadata
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[Any] , snake_case :List[str] , snake_case :int=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :List[Any]=None , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :Any=[0.5, 0.5, 0.5] , snake_case :Optional[int]=10 , snake_case :Tuple=False , snake_case :Optional[int]=255 , snake_case :Optional[Any]="shi-labs/oneformer_demo" , snake_case :Optional[Any]="ade20k_panoptic.json" , snake_case :Optional[int]=10 , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : List[str] = batch_size
A_ : Optional[int] = num_channels
A_ : Tuple = min_resolution
A_ : List[Any] = max_resolution
A_ : Union[str, Any] = do_resize
A_ : Any = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : List[Any] = image_std
A_ : Union[str, Any] = class_info_file
A_ : List[Any] = prepare_metadata(snake_case , snake_case )
A_ : Tuple = num_text
A_ : str = repo_path
# for the post_process_functions
A_ : Any = 2
A_ : int = 10
A_ : Optional[int] = 10
A_ : Tuple = 3
A_ : Tuple = 4
A_ : str = num_labels
A_ : int = do_reduce_labels
A_ : List[Any] = ignore_index
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Any , snake_case :Any=False ):
'''simple docstring'''
if not batched:
A_ : List[str] = image_inputs[0]
if isinstance(snake_case , Image.Image ):
A_ , A_ : Dict = image.size
else:
A_ , A_ : Tuple = image.shape[1], image.shape[2]
if w < h:
A_ : str = int(self.size["shortest_edge"] * h / w )
A_ : Any = self.size["shortest_edge"]
elif w > h:
A_ : Optional[int] = self.size["shortest_edge"]
A_ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
A_ : List[str] = self.size["shortest_edge"]
A_ : Optional[Any] = self.size["shortest_edge"]
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Tuple = max(snake_case , key=lambda snake_case : item[0] )[0]
A_ : Union[str, Any] = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__UpperCamelCase = image_processing_class
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
self.assertTrue(hasattr(snake_case , "ignore_index" ) )
self.assertTrue(hasattr(snake_case , "class_info_file" ) )
self.assertTrue(hasattr(snake_case , "num_text" ) )
self.assertTrue(hasattr(snake_case , "repo_path" ) )
self.assertTrue(hasattr(snake_case , "metadata" ) )
self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : str = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : List[str] = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : int = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Optional[Any] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : Tuple = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
A_ : Any = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict=False , snake_case :str=False , snake_case :Dict="np" ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
A_ : Tuple = self.image_processing_tester.num_labels
A_ : str = None
A_ : Tuple = None
A_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
if with_segmentation_maps:
A_ : List[str] = num_labels
if is_instance_map:
A_ : List[str] = list(range(snake_case ) ) * 2
A_ : int = dict(enumerate(snake_case ) )
A_ : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
A_ : int = [Image.fromarray(snake_case ) for annotation in annotations]
A_ : List[str] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , )
return inputs
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
def common(snake_case :Dict=False , snake_case :Optional[int]=None ):
A_ : Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case )
A_ : Optional[Any] = inputs["mask_labels"]
A_ : List[Any] = inputs["class_labels"]
A_ : Optional[Any] = inputs["pixel_values"]
A_ : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case )
common(is_instance_map=snake_case , segmentation_type="pil" )
common(is_instance_map=snake_case , segmentation_type="pil" )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = np.zeros((20, 50) )
A_ : List[str] = 1
A_ : int = 1
A_ : Optional[Any] = 1
A_ : Any = binary_mask_to_rle(snake_case )
self.assertEqual(len(snake_case ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : int = fature_extractor.post_process_semantic_segmentation(snake_case )
self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
A_ : Optional[int] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
A_ : List[Any] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : str = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
A_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
A_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 300 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _lowerCAmelCase ( _UpperCamelCase : dict[int, list[int]] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =len(_UpperCamelCase ) # No of vertices in graph
_SCREAMING_SNAKE_CASE =[0] * n
_SCREAMING_SNAKE_CASE =[False] * n
def dfs(_UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] ):
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
_SCREAMING_SNAKE_CASE =min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_SCREAMING_SNAKE_CASE =min(low[at] , low[to] )
_SCREAMING_SNAKE_CASE =[]
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : str ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
for ch in input_str:
_SCREAMING_SNAKE_CASE =ord(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =pow(2 , _UpperCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 127 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , *__snake_case , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ):
super().__init__(*__snake_case , **__snake_case )
snake_case = eval_examples
snake_case = post_process_function
snake_case = quant_trainer_args
snake_case = 1_2_8 # default number of calibration samples
def a_ ( self , __snake_case=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
snake_case = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case = self._remove_unused_columns(__snake_case , description='''Calibration''' )
return DataLoader(
__snake_case , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__snake_case , )
def a_ ( self , __snake_case=None ):
snake_case = self.train_dataset if calib_dataset is None else calib_dataset
snake_case = self.get_calib_dataloader(__snake_case )
snake_case = self.model
quant_trainer.configure_model(__snake_case , self.quant_trainer_args , calib=__snake_case )
model.eval()
quant_trainer.enable_calibration(__snake_case )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__snake_case ):
# Prediction step
snake_case , snake_case , snake_case = self.prediction_step(__snake_case , __snake_case , prediction_loss_only=__snake_case )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__snake_case , self.quant_trainer_args )
snake_case = model
def a_ ( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case = "eval" ):
snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case = self.get_eval_dataloader(__snake_case )
snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case = self.compute_metrics
snake_case = None
snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case = eval_loop(
__snake_case , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , )
finally:
snake_case = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case = self.post_process_function(__snake_case , __snake_case , output.predictions )
snake_case = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case = metrics.pop(__snake_case )
self.log(__snake_case )
else:
snake_case = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , __snake_case )
return metrics
def a_ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case = "test" ):
snake_case = self.get_test_dataloader(__snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case = self.compute_metrics
snake_case = None
snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case = eval_loop(
__snake_case , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , )
finally:
snake_case = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case = self.post_process_function(__snake_case , __snake_case , output.predictions , '''predict''' )
snake_case = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case = metrics.pop(__snake_case )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__snake_case )
def a_ ( self , __snake_case="./" ):
snake_case = self.eval_dataset
snake_case = self.get_eval_dataloader(__snake_case )
snake_case = next(iter(__snake_case ) )
# saving device - to make it consistent
snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
snake_case = tuple(v.to(__snake_case ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
snake_case = True
snake_case = self.model.to(__snake_case )
model.eval()
model.float()
snake_case = model.module if hasattr(__snake_case , '''module''' ) else model
quant_trainer.configure_model(__snake_case , self.quant_trainer_args )
snake_case = os.path.join(__snake_case , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
snake_case = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__snake_case , __snake_case , __snake_case , export_params=__snake_case , opset_version=1_3 , do_constant_folding=__snake_case , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__snake_case , )
logger.info('''onnx export finished''' )
| 127 | 1 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : list[int] ) -> Dict:
__lowerCAmelCase = len(__A )
__lowerCAmelCase = [0] * len_array
if len_array > 0:
__lowerCAmelCase = array[0]
for i in range(1 , __A ):
__lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def lowercase ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> List[str]:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowercase ( self : Tuple , lowerCAmelCase_ : int ) -> Union[str, Any]:
__lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[str] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Union[str, Any]=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = ViTConfig()
__lowerCAmelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__lowerCAmelCase = True
__lowerCAmelCase = int(vit_name[-12:-10] )
__lowerCAmelCase = int(vit_name[-9:-6] )
else:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = int(vit_name[-6:-4] )
__lowerCAmelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
__lowerCAmelCase = 192
__lowerCAmelCase = 768
__lowerCAmelCase = 12
__lowerCAmelCase = 3
elif vit_name[9:].startswith('small' ):
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
__lowerCAmelCase = 768
__lowerCAmelCase = 2304
__lowerCAmelCase = 8
__lowerCAmelCase = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif vit_name[4:].startswith('huge' ):
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
# load original model from timm
__lowerCAmelCase = timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__lowerCAmelCase = ViTModel(lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__lowerCAmelCase = DeiTImageProcessor(size=config.image_size )
else:
__lowerCAmelCase = ViTImageProcessor(size=config.image_size )
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = timm_model.forward_features(lowerCAmelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase_, outputs.pooler_output, atol=1E-3 )
else:
__lowerCAmelCase = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 207 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> list:
"""simple docstring"""
UpperCamelCase = len(A__ )
for i in range(1 , A__ ):
UpperCamelCase = collection[i]
UpperCamelCase = 0
UpperCamelCase = i - 1
while low <= high:
UpperCamelCase = (low + high) // 2
if val < collection[mid]:
UpperCamelCase = mid - 1
else:
UpperCamelCase = mid + 1
for j in range(A__ , A__ , -1 ):
UpperCamelCase = collection[j - 1]
UpperCamelCase = val
return collection
if __name__ == "__main__":
_lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip()
_lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 28 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : Optional[int] = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = '''vivit'''
def __init__( self , lowerCAmelCase__=2_24 , lowerCAmelCase__=32 , lowerCAmelCase__=[2, 16, 16] , lowerCAmelCase__=3 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu_fast" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-06 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = num_frames
__lowercase = tubelet_size
__lowercase = num_channels
__lowercase = qkv_bias
super().__init__(**lowerCAmelCase__ ) | 210 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class _lowerCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> int:
UpperCamelCase = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe.dual_guided(
prompt="first prompt" , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
UpperCamelCase = VersatileDiffusionPipeline.from_pretrained(__a , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = generator.manual_seed(0 )
UpperCamelCase = pipe.dual_guided(
prompt="first prompt" , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = "cyberpunk 2077"
UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe.dual_guided(
prompt=__a , image=__a , text_to_image_strength=0.75 , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase = "A painting of a squirrel eating a burger "
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe.text_to_image(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase = pipe.image_variation(__a , generator=__a , output_type="numpy" ).images
UpperCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 244 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase__ = NewType('''DataClass''', Any)
lowerCAmelCase__ = NewType('''DataClassType''', Any)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {str(_SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda _SCREAMING_SNAKE_CASE : str_to_choice.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( *,
_SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCamelCase = {}
if aliases is not None:
UpperCamelCase = aliases
if help is not None:
UpperCamelCase = help
return dataclasses.field(metadata=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , default_factory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = 42
def __init__(self , __a , **__a ) -> Any:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCamelCase = ArgumentDefaultsHelpFormatter
super().__init__(**__a )
if dataclasses.is_dataclass(__a ):
UpperCamelCase = [dataclass_types]
UpperCamelCase = list(__a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__a )
@staticmethod
def snake_case_ (__a , __a ) -> Optional[Any]:
UpperCamelCase = F"--{field.name}"
UpperCamelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __a ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
UpperCamelCase = kwargs.pop("aliases" , [] )
if isinstance(__a , __a ):
UpperCamelCase = [aliases]
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__a , "UnionType" ) and isinstance(__a , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__a ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F" Problem encountered in field '{field.name}'." )
if type(__a ) not in field.type.__args__:
# filter `str` in Union
UpperCamelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCamelCase = (
field.type.__args__[0] if isinstance(__a , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCamelCase = {}
if origin_type is Literal or (isinstance(field.type , __a ) and issubclass(field.type , __a )):
if origin_type is Literal:
UpperCamelCase = field.type.__args__
else:
UpperCamelCase = [x.value for x in field.type]
UpperCamelCase = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
UpperCamelCase = field.default
else:
UpperCamelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCamelCase = copy(__a )
# Hack because type=bool in argparse does not behave as we want.
UpperCamelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCamelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCamelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCamelCase = "?"
# This is the value that will get picked if we do --field_name (without value)
UpperCamelCase = True
elif isclass(__a ) and issubclass(__a , __a ):
UpperCamelCase = field.type.__args__[0]
UpperCamelCase = "+"
if field.default_factory is not dataclasses.MISSING:
UpperCamelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCamelCase = True
else:
UpperCamelCase = field.type
if field.default is not dataclasses.MISSING:
UpperCamelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCamelCase = field.default_factory()
else:
UpperCamelCase = True
parser.add_argument(__a , *__a , **__a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCamelCase = False
parser.add_argument(F"--no_{field.name}" , action="store_false" , dest=field.name , **__a )
def snake_case_ (self , __a ) -> List[Any]:
if hasattr(__a , "_argument_group_name" ):
UpperCamelCase = self.add_argument_group(dtype._argument_group_name )
else:
UpperCamelCase = self
try:
UpperCamelCase = get_type_hints(__a )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__a ):
UpperCamelCase = ".".join(map(__a , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__a ):
if not field.init:
continue
UpperCamelCase = type_hints[field.name]
self._parse_dataclass_field(__a , __a )
def snake_case_ (self , __a=None , __a=False , __a=True , __a=None , __a=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCamelCase = []
if args_filename:
args_files.append(Path(__a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCamelCase = ArgumentParser()
args_file_parser.add_argument(__a , type=__a , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCamelCase , UpperCamelCase = args_file_parser.parse_known_args(args=__a )
UpperCamelCase = vars(__a ).get(args_file_flag.lstrip("-" ) , __a )
if cmd_args_file_paths:
args_files.extend([Path(__a ) for p in cmd_args_file_paths] )
UpperCamelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCamelCase = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCamelCase , UpperCamelCase = self.parse_known_args(args=__a )
UpperCamelCase = []
for dtype in self.dataclass_types:
UpperCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCamelCase = {k: v for k, v in vars(__a ).items() if k in keys}
for k in keys:
delattr(__a , __a )
UpperCamelCase = dtype(**__a )
outputs.append(__a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
UpperCamelCase = set(args.keys() )
UpperCamelCase = []
for dtype in self.dataclass_types:
UpperCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCamelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCamelCase = dtype(**__a )
outputs.append(__a )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(__a )}" )
return tuple(__a )
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
with open(Path(__a ) , encoding="utf-8" ) as open_json_file:
UpperCamelCase = json.loads(open_json_file.read() )
UpperCamelCase = self.parse_dict(__a , allow_extra_keys=__a )
return tuple(__a )
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
UpperCamelCase = self.parse_dict(yaml.safe_load(Path(__a ).read_text() ) , allow_extra_keys=__a )
return tuple(__a )
| 244 | 1 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class UpperCAmelCase_ ( logging.LoggerAdapter):
@staticmethod
def _UpperCAmelCase ( a ) -> Dict:
lowercase__ : Any = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _UpperCAmelCase ( self , a , a , *a , **a ) -> Union[str, Any]:
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
lowercase__ : str = kwargs.pop('main_process_only' , a )
lowercase__ : Optional[int] = kwargs.pop('in_order' , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
lowercase__ , lowercase__ : int = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
lowercase__ : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase__ , lowercase__ : Optional[Any] = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str = None ):
'''simple docstring'''
if log_level is None:
lowercase__ : Optional[Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCAmelCase )
lowercase__ : List[Any] = logging.getLogger(_lowerCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCAmelCase , {} )
| 77 | """simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_UpperCamelCase : List[Any] = logging.get_logger(__name__)
_UpperCamelCase : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_UpperCamelCase : Optional[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_UpperCamelCase : Optional[int] = {
"allenai/led-base-16384": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def a_ ( ):
'''simple docstring'''
lowercase__ : int = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase__ : Union[str, Any] = bs[:]
lowercase__ : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
lowercase__ : str = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Dict = set()
lowercase__ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Optional[Any] = char
return pairs
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , a , a , a="replace" , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=False , **a , ) -> Any:
lowercase__ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
lowercase__ : List[str] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
lowercase__ : List[str] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
lowercase__ : Dict = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
lowercase__ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
lowercase__ : Tuple = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding='utf-8' ) as vocab_handle:
lowercase__ : Tuple = json.load(a )
lowercase__ : Dict = {v: k for k, v in self.encoder.items()}
lowercase__ : str = errors # how to handle errors in decoding
lowercase__ : Optional[Any] = bytes_to_unicode()
lowercase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding='utf-8' ) as merges_handle:
lowercase__ : Optional[Any] = merges_handle.read().split('\n' )[1:-1]
lowercase__ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ : Union[str, Any] = dict(zip(a , range(len(a ) ) ) )
lowercase__ : Tuple = {}
lowercase__ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ : List[Any] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _UpperCAmelCase ( self ) -> List[Any]:
return len(self.encoder )
def _UpperCAmelCase ( self ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self , a ) -> List[str]:
if token in self.cache:
return self.cache[token]
lowercase__ : Optional[Any] = tuple(a )
lowercase__ : int = get_pairs(a )
if not pairs:
return token
while True:
lowercase__ : List[str] = min(a , key=lambda a : self.bpe_ranks.get(a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : List[str] = bigram
lowercase__ : Union[str, Any] = []
lowercase__ : List[Any] = 0
while i < len(a ):
try:
lowercase__ : str = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : Optional[int] = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : int = tuple(a )
lowercase__ : Dict = new_word
if len(a ) == 1:
break
else:
lowercase__ : Any = get_pairs(a )
lowercase__ : List[str] = ' '.join(a )
lowercase__ : Optional[Any] = word
return word
def _UpperCAmelCase ( self , a ) -> Union[str, Any]:
lowercase__ : Tuple = []
for token in re.findall(self.pat , a ):
lowercase__ : Union[str, Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) )
return bpe_tokens
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self , a ) -> Optional[int]:
return self.decoder.get(a )
def _UpperCAmelCase ( self , a ) -> str:
lowercase__ : Any = ''.join(a )
lowercase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ : str = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '\n' )
lowercase__ : List[Any] = 0
with open(a , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowercase__ : Union[str, Any] = token_index
writer.write(' '.join(a ) + '\n' )
index += 1
return vocab_file, merge_file
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
lowercase__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self , a , a = None , a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
lowercase__ : Dict = [self.sep_token_id]
lowercase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , a , a=False , **a ) -> Optional[int]:
lowercase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
lowercase__ : List[str] = ' ' + text
return (text, kwargs)
def _UpperCAmelCase ( self , a , a = None , a = PaddingStrategy.DO_NOT_PAD , a = None , a = None , ) -> dict:
lowercase__ : Dict = super()._pad(
encoded_inputs=a , max_length=a , padding_strategy=a , pad_to_multiple_of=a , return_attention_mask=a , )
# Load from model defaults
if return_attention_mask is None:
lowercase__ : Union[str, Any] = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ : Tuple = len(encoded_inputs['global_attention_mask'] ) != len(a )
if needs_to_be_padded:
lowercase__ : str = len(a ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ : Union[str, Any] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ : List[str] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 77 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["pixel_values"]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = 32 , _UpperCAmelCase=PILImageResampling.BILINEAR , _UpperCAmelCase = True , **_UpperCAmelCase , ):
snake_case_ = do_resize
snake_case_ = do_rescale
snake_case_ = size_divisor
snake_case_ = resample
super().__init__(**_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase ):
snake_case_ , snake_case_ = get_image_size(_UpperCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case_ = height // size_divisor * size_divisor
snake_case_ = width // size_divisor * size_divisor
snake_case_ = resize(_UpperCAmelCase , (new_h, new_w) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
return image
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase ):
return rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = size_divisor if size_divisor is not None else self.size_divisor
snake_case_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case_ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(_UpperCAmelCase ) for img in images]
if do_resize:
snake_case_ = [self.resize(_UpperCAmelCase , size_divisor=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(_UpperCAmelCase , scale=1 / 2_55 ) for image in images]
snake_case_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
snake_case_ = {'''pixel_values''': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase ) | 267 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase__ ( self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCamelCase__ ( self , **_UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return ("This is a test", "This is a test")
def UpperCamelCase__ ( self ):
snake_case_ = '''</s>'''
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(_UpperCAmelCase ) , 11_03 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def UpperCamelCase__ ( self ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
snake_case_ = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
snake_case_ = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
snake_case_ = '''To ensure a smooth flow of bank resolutions.'''
snake_case_ = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = ['''This is going to be way too long.''' * 1_50, '''short example''']
snake_case_ = ['''not super long but more than 5 tokens''', '''tiny''']
snake_case_ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def UpperCamelCase__ ( self ):
# fmt: off
snake_case_ = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase__ ( self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCamelCase__ ( self , **_UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return ("This is a test", "This is a test")
def UpperCamelCase__ ( self ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = ['''This is going to be way too long.''' * 10_00, '''short example''']
snake_case_ = ['''not super long but more than 5 tokens''', '''tiny''']
snake_case_ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
def UpperCamelCase__ ( self ):
snake_case_ = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
snake_case_ = self._large_tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(
_UpperCAmelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , ) | 267 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 59 |
from functools import lru_cache
@lru_cache
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ : Optional[Any] = "RegNetConfig"
# Base docstring
UpperCAmelCase_ : List[str] = "facebook/regnet-y-040"
UpperCAmelCase_ : Optional[int] = [1, 1_088, 7, 7]
# Image classification docstring
UpperCAmelCase_ : Dict = "facebook/regnet-y-040"
UpperCAmelCase_ : Any = "tabby, tabby cat"
UpperCAmelCase_ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = 3 , UpperCAmelCase__ = 1 , UpperCAmelCase__ = 1 , UpperCAmelCase__ = "relu" , **UpperCAmelCase__ , ):
super().__init__(**UpperCAmelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A__ = tf.keras.layers.ConvaD(
filters=UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , strides=UpperCAmelCase__ , padding="VALID" , groups=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" , )
A__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
A__ = ACTaFN[activation] if activation is not None else tf.identity
def __A ( self , UpperCAmelCase__ ):
A__ = self.convolution(self.padding(UpperCAmelCase__ ) )
A__ = self.normalization(UpperCAmelCase__ )
A__ = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , UpperCAmelCase__ , **UpperCAmelCase__ ):
super().__init__(**UpperCAmelCase__ )
A__ = config.num_channels
A__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __A ( self , UpperCAmelCase__ ):
A__ = shape_list(UpperCAmelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A__ = tf.transpose(UpperCAmelCase__ , perm=(0, 2, 3, 1) )
A__ = self.embedder(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = 2 , **UpperCAmelCase__ ):
super().__init__(**UpperCAmelCase__ )
A__ = tf.keras.layers.ConvaD(
filters=UpperCAmelCase__ , kernel_size=1 , strides=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" )
A__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = False ):
return self.normalization(self.convolution(UpperCAmelCase__ ) , training=UpperCAmelCase__ )
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
super().__init__(**UpperCAmelCase__ )
A__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" )
A__ = [
tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __A ( self , UpperCAmelCase__ ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
A__ = self.pooler(UpperCAmelCase__ )
for layer_module in self.attention:
A__ = layer_module(UpperCAmelCase__ )
A__ = hidden_state * pooled
return hidden_state
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 , **UpperCAmelCase__ ):
super().__init__(**UpperCAmelCase__ )
A__ = in_channels != out_channels or stride != 1
A__ = max(1 , out_channels // config.groups_width )
A__ = (
TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A__ = [
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.2" ),
]
A__ = ACTaFN[config.hidden_act]
def __A ( self , UpperCAmelCase__ ):
A__ = hidden_state
for layer_module in self.layers:
A__ = layer_module(UpperCAmelCase__ )
A__ = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
A__ = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 , **UpperCAmelCase__ ):
super().__init__(**UpperCAmelCase__ )
A__ = in_channels != out_channels or stride != 1
A__ = max(1 , out_channels // config.groups_width )
A__ = (
TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
A__ = [
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.3" ),
]
A__ = ACTaFN[config.hidden_act]
def __A ( self , UpperCAmelCase__ ):
A__ = hidden_state
for layer_module in self.layers:
A__ = layer_module(UpperCAmelCase__ )
A__ = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
A__ = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 2 , UpperCAmelCase__ = 2 , **UpperCAmelCase__ ):
super().__init__(**UpperCAmelCase__ )
A__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
A__ = [
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , name="layers.0" ),
*[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __A ( self , UpperCAmelCase__ ):
for layer_module in self.layers:
A__ = layer_module(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , UpperCAmelCase__ , **UpperCAmelCase__ ):
super().__init__(**UpperCAmelCase__ )
A__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
A__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ , name=F"""stages.{i+1}""" ) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = False , UpperCAmelCase__ = True ):
A__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
A__ = stage_module(UpperCAmelCase__ )
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ )
@keras_serializable
class UpperCamelCase ( tf.keras.layers.Layer ):
lowerCAmelCase : Dict = RegNetConfig
def __init__( self , UpperCAmelCase__ , **UpperCAmelCase__ ):
super().__init__(**UpperCAmelCase__ )
A__ = config
A__ = TFRegNetEmbeddings(UpperCAmelCase__ , name="embedder" )
A__ = TFRegNetEncoder(UpperCAmelCase__ , name="encoder" )
A__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" )
@unpack_inputs
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , ):
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.embedder(UpperCAmelCase__ , training=UpperCAmelCase__ )
A__ = self.encoder(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ )
A__ = encoder_outputs[0]
A__ = self.pooler(UpperCAmelCase__ )
# Change to NCHW output format have uniformity in the modules
A__ = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) )
A__ = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A__ = tuple([tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : str = RegNetConfig
lowerCAmelCase : str = """regnet"""
lowerCAmelCase : str = """pixel_values"""
@property
def __A ( self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase_ : Union[str, Any] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase_ : str = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , _UpperCAmelCase , )
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ):
super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__=False , ):
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.regnet(
pixel_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _UpperCAmelCase , )
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ):
super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = config.num_labels
A__ = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" )
# classification head
A__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A ( self , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__=False , ):
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.regnet(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier[0](UpperCAmelCase__ )
A__ = self.classifier[1](UpperCAmelCase__ )
A__ = None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase__ , logits=UpperCAmelCase__ )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
| 363 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCamelCase ( _A : Tuple )-> Dict:
"""simple docstring"""
A__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def UpperCamelCase ( _A : int )-> Optional[Any]:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(_A , _A , bias=_A )
A__ = emb.weight.data
return lin_layer
def UpperCamelCase ( _A : str , _A : Optional[Any]=None )-> str:
"""simple docstring"""
A__ = {}
for old_key in state_dict.keys():
A__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A__ = key.replace("moe_layer.experts.0" , f"""ffn.experts.expert_{expert_idx}""" )
else:
A__ = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
A__ = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
A__ = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
A__ = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
A__ = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
A__ = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
A__ = key.replace("final_layer_norm" , "ff_layer_norm" )
A__ = state_dict[old_key]
return new_dict
def UpperCamelCase ( _A : Tuple , _A : Tuple , _A : int , _A : str , _A : str = WEIGHTS_NAME )-> List[str]:
"""simple docstring"""
A__ = []
A__ = 0
os.makedirs(_A , exist_ok=_A )
for expert in range(_A ):
A__ = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(_A ):
A__ = torch.load(_A )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = os.path.join(
_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
torch.save(_A , _A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_A )[0]].dtype )
# Add the last block
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
A__ = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_A ) == 1:
A__ = os.path.join(_A , _A )
torch.save(_A , _A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_A , _A )
# Otherwise, let's build the index
A__ = {}
for idx, shard in enumerate(_A ):
A__ = weights_name.replace(".bin" , f"""-{idx+1:05d}-of-{len(_A ):05d}.bin""" )
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_A , os.path.join(_A , _A ) )
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {"total_size": total_size}
A__ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_A , _A ) , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
UpperCAmelCase_ : Any = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCAmelCase_ : Tuple = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 198 | 0 |
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase__ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
lowercase__ = model.state_dict()
def to_tf_var_name(lowerCamelCase_ ):
for patt, repl in iter(lowerCamelCase_ ):
lowercase__ = name.replace(lowerCamelCase_ , lowerCamelCase_ )
return F"""bert/{name}"""
def create_tf_var(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=lowerCamelCase_ , shape=tensor.shape , name=lowerCamelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCamelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(lowerCamelCase_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=lowerCamelCase_ , name=lowerCamelCase_ , session=lowerCamelCase_ )
tf.keras.backend.set_value(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = session.run(lowerCamelCase_ )
print(F"""Successfully created {tf_name}: {np.allclose(lowerCamelCase_ , lowerCamelCase_ )}""" )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def a ( lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory in which to save tensorflow model''' )
lowercase__ = parser.parse_args(lowerCamelCase_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCamelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 207 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : List[Any] ):
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_a ) , torch_builtin(_a ) ) )
self.assertFalse(torch.allclose(gelu_python(_a ) , gelu_new(_a ) ) )
def A_ ( self : Tuple ):
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation('''gelu''' )
UpperCamelCase__ = get_activation('''gelu_10''' )
UpperCamelCase__ = torch_builtin(_a )
UpperCamelCase__ = geluaa(_a )
UpperCamelCase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A_ ( self : str ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_a ):
get_activation('''bogus''' )
with self.assertRaises(_a ):
get_activation(_a )
def A_ ( self : List[Any] ):
UpperCamelCase__ = get_activation('''gelu''' )
UpperCamelCase__ = 1
UpperCamelCase__ = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_a ):
UpperCamelCase__ = acta.a
| 35 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 35 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
def __init__(self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = size if size is not None else {"""shortest_edge""": 2_24}
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = crop_pct
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
UpperCamelCase__ = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
UpperCamelCase__ = int(size["""height"""] / crop_pct )
else:
UpperCamelCase__ = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
else:
if "shortest_edge" in size:
UpperCamelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size["""shortest_edge"""] , default_to_square=SCREAMING_SNAKE_CASE_ )
elif "height" in size and "width" in size:
UpperCamelCase__ = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(SCREAMING_SNAKE_CASE_ ) )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = crop_pct if crop_pct is not None else self.crop_pct
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""" )
UpperCamelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase__ = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 244 |
from collections.abc import Generator
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = 0, 1
while True:
UpperCamelCase__ , UpperCamelCase__ = b, a + b
yield b
def __magic_name__ ( __a : int = 1_000 ):
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = fibonacci_generator()
while len(str(next(__a ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 244 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : int = "gpt_neo"
A : List[Any] = ["past_key_values"]
A : Dict = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Any , _lowerCAmelCase : Optional[Any]=5_02_57 , _lowerCAmelCase : Any=20_48 , _lowerCAmelCase : Tuple=20_48 , _lowerCAmelCase : Dict=24 , _lowerCAmelCase : Any=[[["global", "local"], 12]] , _lowerCAmelCase : int=16 , _lowerCAmelCase : str=None , _lowerCAmelCase : List[str]=2_56 , _lowerCAmelCase : Union[str, Any]="gelu_new" , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Tuple=1e-5 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Any=5_02_56 , _lowerCAmelCase : Union[str, Any]=5_02_56 , **_lowerCAmelCase : Optional[int] , ):
__snake_case : Optional[Any] = vocab_size
__snake_case : Optional[Any] = max_position_embeddings
__snake_case : Optional[Any] = hidden_size
__snake_case : Optional[int] = num_layers
__snake_case : Tuple = num_heads
__snake_case : Tuple = intermediate_size
__snake_case : str = window_size
__snake_case : Dict = activation_function
__snake_case : Union[str, Any] = resid_dropout
__snake_case : Union[str, Any] = embed_dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[Any] = classifier_dropout
__snake_case : str = layer_norm_epsilon
__snake_case : Dict = initializer_range
__snake_case : Optional[Any] = use_cache
__snake_case : Any = bos_token_id
__snake_case : Tuple = eos_token_id
__snake_case : Dict = attention_types
__snake_case : Union[str, Any] = self.expand_attention_types_params(_lowerCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Optional[Any] ):
__snake_case : Union[str, Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
import torch
__snake_case : Optional[int] = input.size()
__snake_case : Optional[int] = len(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[int] = shape[dimension]
__snake_case : Optional[Any] = torch.arange(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : int = torch.div(sizedim - size , __SCREAMING_SNAKE_CASE , rounding_mode="""floor""" ) + 1
__snake_case : Optional[Any] = torch.arange(__SCREAMING_SNAKE_CASE ) + low_indices[:min_length][:, None]
__snake_case : List[str] = [slice(__SCREAMING_SNAKE_CASE )] * rank
__snake_case : Optional[int] = indices
__snake_case : Any = input[s]
__snake_case : Any = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
import torch
__snake_case : Optional[int] = torch.arange(1 , __SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = torch.remainder(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : str = remainders == 0
__snake_case : int = candidates[divisor_indices]
__snake_case : List[Any] = torch.max(__SCREAMING_SNAKE_CASE )
return largest_divisor, torch.div(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rounding_mode="""floor""" )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="""inputs""" )
__snake_case : Tuple = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__snake_case : Dict = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def snake_case__ ( self : List[str] ):
return self._config.num_heads
def snake_case__ ( self : Any , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
__snake_case : Optional[int] = super(_lowerCAmelCase , self ).generate_dummy_inputs(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
__snake_case : Optional[int] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__snake_case , __snake_case : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__snake_case : Optional[Any] = seqlen + 2
__snake_case : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case : Optional[int] = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
__snake_case : Optional[int] = common_inputs["""attention_mask"""]
if self.use_past:
__snake_case : List[str] = ordered_inputs["""attention_mask"""].dtype
__snake_case : Optional[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def snake_case__ ( self : List[Any] ):
return 13
| 20 | import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case : Any = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
__snake_case : Dict = config_class.from_json_file(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = True
__snake_case : Union[str, Any] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__snake_case : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__snake_case : Optional[Any] = cached_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__snake_case : List[Any] = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
__snake_case : Tuple = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network
__snake_case : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__snake_case : Any = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case : Union[str, Any] = pt_model(**pt_model.dummy_inputs )
__snake_case : Any = pto[0].numpy()
__snake_case : Optional[int] = tfo[0].numpy()
__snake_case : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format="""h5""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , ):
'''simple docstring'''
if args_model_type is None:
__snake_case : Tuple = list(MODEL_CLASSES.keys() )
else:
__snake_case : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ):
print("""=""" * 1_0_0 )
print(F''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' )
print("""=""" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__snake_case : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__snake_case : Union[str, Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ):
print("""-""" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__snake_case : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 1_0_0 )
if config_shortcut_name in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__snake_case : Union[str, Any] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : List[Any] = model_shortcut_name
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(__SCREAMING_SNAKE_CASE )
os.remove(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 20 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase : Optional[Any] = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "retribert"
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=30_522 , __SCREAMING_SNAKE_CASE : Dict=768 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3_072 , __SCREAMING_SNAKE_CASE : Tuple="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : int=0 , **__SCREAMING_SNAKE_CASE : str , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = share_encoders
__SCREAMING_SNAKE_CASE = projection_dim
| 267 |
'''simple docstring'''
from itertools import count
def a__ ( a__ = 50 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [1] * min_block_length
for n in count(a__ ):
fill_count_functions.append(1 )
for block_length in range(a__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 267 | 1 |
__UpperCAmelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__UpperCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__UpperCAmelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 139 |
def snake_case_ () -> List[Any]:
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def snake_case_ (__A : Dict ) -> Tuple:
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[int] = 2
while i * i <= n:
__lowerCAmelCase : Optional[int] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def snake_case_ () -> Dict:
return next(i for i in triangle_number_generator() if count_divisors(__A ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 139 | 1 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class __lowerCAmelCase ( a__ ):
def __init__( self : Any , *A : Union[str, Any] , **A : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
requires_backends(self , 'vision')
self.check_model_type(__lowerCAmelCase)
def __call__( self : List[Any] , A : Any , **A : Union[str, Any]) -> int:
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def _lowerCamelCase ( self : Optional[int] , **A : str) -> Optional[Any]:
"""simple docstring"""
return {}, {}, {}
def _lowerCamelCase ( self : Optional[Any] , A : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = load_image(__lowerCAmelCase)
_UpperCAmelCase = image.size
_UpperCAmelCase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework)
return model_inputs
def _lowerCamelCase ( self : str , A : Optional[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model(**__lowerCAmelCase)
return model_outputs
def _lowerCamelCase ( self : List[str] , A : Tuple) -> Dict:
"""simple docstring"""
_UpperCAmelCase = model_outputs.predicted_depth
_UpperCAmelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) , size=self.image_size[::-1] , mode='bicubic' , align_corners=__lowerCAmelCase)
_UpperCAmelCase = prediction.squeeze().cpu().numpy()
_UpperCAmelCase = (output * 2_55 / np.max(__lowerCAmelCase)).astype('uint8')
_UpperCAmelCase = Image.fromarray(__lowerCAmelCase)
_UpperCAmelCase = {}
_UpperCAmelCase = predicted_depth
_UpperCAmelCase = depth
return output_dict
| 339 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = CTRLTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Optional[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ : str = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Tuple = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ : Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : List[str] = '''adapt react readapt apt'''
lowercase__ : Union[str, Any] = '''adapt react readapt apt'''
return input_text, output_text
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Optional[Any] = '''adapt react readapt apt'''
lowercase__ : Dict = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = tokens + [tokenizer.unk_token]
lowercase__ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 198 | 0 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A_ :Tuple = logging.get_logger(__name__)
def A ( a_ ,a_ ,a_ ) -> None:
__UpperCamelCase : Optional[Any] =nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F'{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A_ :Any = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A_ :List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def A ( a_ ,a_ ) -> Optional[int]:
try:
__UpperCamelCase : Tuple =LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(__lowerCAmelCase ) )
def A ( a_ ,a_ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(__lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def A ( a_ ,a_ = "student" ,a_ = None ,a_ = None ,a_=False ,a_=None ,a_=None ,**a_ ,) -> Tuple[PreTrainedModel, List[int], List[int]]:
__UpperCamelCase : Union[str, Any] ="""encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience
__UpperCamelCase : Union[str, Any] =AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval()
else:
assert isinstance(__lowerCAmelCase ,__lowerCAmelCase ), F'teacher must be a model or string got type {type(__lowerCAmelCase )}'
__UpperCamelCase : Tuple =teacher.config.to_diff_dict()
try:
__UpperCamelCase : Tuple =teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__UpperCamelCase : str =teacher_e
if d is None:
__UpperCamelCase : Optional[Any] =teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config ,'num_encoder_layers' ):
__UpperCamelCase : Optional[Any] =teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__UpperCamelCase : int =teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__UpperCamelCase : Tuple =teacher_e
if d is None:
__UpperCamelCase : List[str] =teacher_d
if hasattr(teacher.config ,'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCAmelCase )
# Copy weights
__UpperCamelCase : Dict =teacher.config_class(**__lowerCAmelCase )
__UpperCamelCase : Optional[Any] =AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__UpperCamelCase : Optional[int] =student.load_state_dict(teacher.state_dict() ,strict=__lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__UpperCamelCase : Any =list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(__lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__UpperCamelCase : List[int] =pick_layers_to_copy(__lowerCAmelCase ,__lowerCAmelCase )
if d_layers_to_copy is None:
__UpperCamelCase : List[int] =pick_layers_to_copy(__lowerCAmelCase ,__lowerCAmelCase )
try:
if hasattr(
__lowerCAmelCase ,'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers ,student.prophetnet.encoder.layers ,__lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers ,student.prophetnet.decoder.layers ,__lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers ,student.model.encoder.layers ,__lowerCAmelCase )
copy_layers(teacher.model.decoder.layers ,student.model.decoder.layers ,__lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block ,student.encoder.block ,__lowerCAmelCase )
copy_layers(teacher.decoder.block ,student.decoder.block ,__lowerCAmelCase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
__UpperCamelCase : Any ={
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(__lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 362 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def A ( a_ ,a_ ,a_ ,a_ ,a_ ) -> Optional[int]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__UpperCamelCase : Optional[int] =TapasConfig.from_json_file(a_ )
# set absolute/relative position embeddings parameter
__UpperCamelCase : str =reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCamelCase : Optional[Any] =TapasForQuestionAnswering(config=a_ )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCamelCase : Optional[int] =4
__UpperCamelCase : Optional[Any] =True
# hparam_utils.py hparams
__UpperCamelCase : int =0.664_694
__UpperCamelCase : Any =0.207_951
__UpperCamelCase : Tuple =0.121_194
__UpperCamelCase : List[str] =True
__UpperCamelCase : Dict =True
__UpperCamelCase : Optional[Any] =False
__UpperCamelCase : Optional[int] =0.0_352_513
__UpperCamelCase : Optional[Any] =TapasForQuestionAnswering(config=a_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCamelCase : List[Any] =4
__UpperCamelCase : List[str] =False
# hparam_utils.py hparams
__UpperCamelCase : List[str] =36.4_519
__UpperCamelCase : Dict =0.903_421
__UpperCamelCase : List[Any] =222.088
__UpperCamelCase : Optional[Any] =True
__UpperCamelCase : Optional[int] =True
__UpperCamelCase : Dict =True
__UpperCamelCase : Dict =0.763_141
__UpperCamelCase : Union[str, Any] =TapasForQuestionAnswering(config=a_ )
elif task == "TABFACT":
__UpperCamelCase : List[Any] =TapasForSequenceClassification(config=a_ )
elif task == "MLM":
__UpperCamelCase : Optional[Any] =TapasForMaskedLM(config=a_ )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCamelCase : Optional[Any] =TapasModel(config=a_ )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(a_ ,a_ ,a_ )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(a_ )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
__UpperCamelCase : Optional[Any] =TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' ,model_max_length=512 )
tokenizer.save_pretrained(a_ )
print('Used relative position embeddings:' ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ :Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 245 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__a = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__a = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
__a = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase ( self : Dict ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def lowerCamelCase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , ):
snake_case__ : Optional[Any] = len(references[0] )
if any(len(snake_case_ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
snake_case__ : Dict = [[refs[i] for refs in references] for i in range(snake_case_ )]
snake_case__ : Dict = TER(
normalized=snake_case_ , no_punct=snake_case_ , asian_support=snake_case_ , case_sensitive=snake_case_ , )
snake_case__ : Union[str, Any] = sb_ter.corpus_score(snake_case_ , snake_case_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 35 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
snake_case__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = """"""
else:
snake_case__ : Dict = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Tuple = in_proj_bias[-config.hidden_size :]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : str = dct.pop(_lowerCAmelCase )
snake_case__ : Tuple = val
def __snake_case( ) -> Tuple:
snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Union[str, Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : int = 1_000
snake_case__ : Any = """huggingface/label-files"""
snake_case__ : Optional[Any] = """imagenet-1k-id2label.json"""
snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ : Tuple = 192
snake_case__ : Union[str, Any] = 768
snake_case__ : Tuple = 12
snake_case__ : Union[str, Any] = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ : str = 384
snake_case__ : Any = 1_536
snake_case__ : str = 12
snake_case__ : int = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ : Union[str, Any] = 1_024
snake_case__ : Any = 4_096
snake_case__ : List[Any] = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : List[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ : Optional[Any] = encoding["""pixel_values"""]
snake_case__ : Tuple = model(_lowerCAmelCase )
snake_case__ : Optional[int] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 35 | 1 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_SCREAMING_SNAKE_CASE ) , """Tatoeba directory does not exist.""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.resolver.convert_models(['heb-eng'] )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.resolver.write_model_card('opus-mt-he-en' , dry_run=lowerCAmelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 351 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """poolformer"""
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=1_6 , lowerCAmelCase__=1_6 , lowerCAmelCase__=3 , lowerCAmelCase__=4.0 , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowerCAmelCase__=[7, 3, 3, 3] , lowerCAmelCase__=[4, 2, 2, 2] , lowerCAmelCase__=[2, 1, 1, 1] , lowerCAmelCase__=4 , lowerCAmelCase__=0.0 , lowerCAmelCase__="gelu" , lowerCAmelCase__=True , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.02 , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = stride
SCREAMING_SNAKE_CASE_ : List[Any] = padding
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pool_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_sizes
SCREAMING_SNAKE_CASE_ : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE_ : Dict = depths
SCREAMING_SNAKE_CASE_ : List[Any] = patch_sizes
SCREAMING_SNAKE_CASE_ : List[Any] = strides
SCREAMING_SNAKE_CASE_ : int = num_encoder_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : Tuple = hidden_act
SCREAMING_SNAKE_CASE_ : str = use_layer_scale
SCREAMING_SNAKE_CASE_ : List[str] = layer_scale_init_value
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
super().__init__(**lowerCAmelCase__ )
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 2E-3
| 162 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class __snake_case ( lowerCAmelCase ):
_a : Any= "camembert"
def __init__( self ,snake_case=30522 ,snake_case=768 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=2 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=1 ,snake_case=0 ,snake_case=2 ,snake_case="absolute" ,snake_case=True ,snake_case=None ,**snake_case ,):
'''simple docstring'''
super().__init__(pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,**snake_case )
lowercase : List[Any] = vocab_size
lowercase : Tuple = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : List[str] = num_attention_heads
lowercase : Optional[Any] = hidden_act
lowercase : Tuple = intermediate_size
lowercase : Any = hidden_dropout_prob
lowercase : List[str] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : Union[str, Any] = initializer_range
lowercase : Tuple = layer_norm_eps
lowercase : Dict = position_embedding_type
lowercase : Union[str, Any] = use_cache
lowercase : Optional[int] = classifier_dropout
class __snake_case ( lowerCAmelCase ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 20 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self ,snake_case ,snake_case=7 ,snake_case=3 ,snake_case=18 ,snake_case=30 ,snake_case=400 ,snake_case=True ,snake_case=None ,snake_case=True ,snake_case=None ,):
'''simple docstring'''
lowercase : Dict = size if size is not None else {"""shortest_edge""": 20}
lowercase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowercase : str = parent
lowercase : int = batch_size
lowercase : str = num_channels
lowercase : int = image_size
lowercase : List[str] = min_resolution
lowercase : str = max_resolution
lowercase : Dict = do_resize
lowercase : Dict = size
lowercase : Dict = do_center_crop
lowercase : str = crop_size
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : Any= MobileNetVaImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = MobileNetVaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case ,"""do_resize""" ) )
self.assertTrue(hasattr(snake_case ,"""size""" ) )
self.assertTrue(hasattr(snake_case ,"""do_center_crop""" ) )
self.assertTrue(hasattr(snake_case ,"""crop_size""" ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,Image.Image )
# Test not batched input
lowercase : Dict = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : Tuple = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case ,numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,np.ndarray )
# Test not batched input
lowercase : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : List[str] = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case ,torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case ,torch.Tensor )
# Test not batched input
lowercase : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowercase : List[str] = image_processing(snake_case ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 20 | 1 |
"""simple docstring"""
from math import pow
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, ) -> tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_UpperCamelCase = int(pow(__snake_case, __snake_case ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_UpperCamelCase , _UpperCamelCase = backtrack(
__snake_case, __snake_case, current_number + 1, __snake_case, __snake_case )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_UpperCamelCase , _UpperCamelCase = backtrack(
__snake_case, __snake_case, current_number + 1, __snake_case, __snake_case )
return current_sum, solutions_count
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(__snake_case, __snake_case, 1, 0, 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'xlm-roberta-xl'
def __init__( self , __a=25_08_80 , __a=25_60 , __a=36 , __a=32 , __a=1_02_40 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_14 , __a=1 , __a=0.02 , __a=1e-05 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , __a=None , **__a , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class _UpperCAmelCase( lowerCamelCase ):
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 100 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _snake_case ( _a ):
_A : List[str] = '''swinv2'''
_A : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]=224 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4 ,SCREAMING_SNAKE_CASE__ : List[str]=3 ,SCREAMING_SNAKE_CASE__ : Dict=96 ,SCREAMING_SNAKE_CASE__ : int=[2, 2, 6, 2] ,SCREAMING_SNAKE_CASE__ : Dict=[3, 6, 12, 24] ,SCREAMING_SNAKE_CASE__ : Dict=7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4.0 ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Any="gelu" ,SCREAMING_SNAKE_CASE__ : List[Any]=False ,SCREAMING_SNAKE_CASE__ : Any=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1e-5 ,SCREAMING_SNAKE_CASE__ : List[Any]=32 ,**SCREAMING_SNAKE_CASE__ : int ,):
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = image_size
SCREAMING_SNAKE_CASE:List[Any] = patch_size
SCREAMING_SNAKE_CASE:Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE:int = embed_dim
SCREAMING_SNAKE_CASE:Optional[int] = depths
SCREAMING_SNAKE_CASE:int = len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE:Union[str, Any] = window_size
SCREAMING_SNAKE_CASE:Union[str, Any] = mlp_ratio
SCREAMING_SNAKE_CASE:Any = qkv_bias
SCREAMING_SNAKE_CASE:Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:Tuple = drop_path_rate
SCREAMING_SNAKE_CASE:List[Any] = hidden_act
SCREAMING_SNAKE_CASE:int = use_absolute_embeddings
SCREAMING_SNAKE_CASE:Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE:Optional[int] = initializer_range
SCREAMING_SNAKE_CASE:Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE:Tuple = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
SCREAMING_SNAKE_CASE:List[Any] = (0, 0, 0, 0)
| 139 |
'''simple docstring'''
def A_ ( snake_case ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
SCREAMING_SNAKE_CASE:Optional[int] = sorted(string.lower() )
return len(snake_case ) == len(set(snake_case ) )
if __name__ == "__main__":
A_ = input("Enter a string ").strip()
A_ = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 139 | 1 |
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = multiprocessing.Manager()
__SCREAMING_SNAKE_CASE = manager.list()
__SCREAMING_SNAKE_CASE = multiprocessing.Process(target=UpperCamelCase_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__SCREAMING_SNAKE_CASE = shutil.rmtree
__SCREAMING_SNAKE_CASE = os.rmdir
__SCREAMING_SNAKE_CASE = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__SCREAMING_SNAKE_CASE = {}
with swallow_io():
with time_limit(UpperCamelCase_ ):
exec(UpperCamelCase_ , UpperCamelCase_ )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(f"failed: {e}" )
# Needed for cleaning up.
__SCREAMING_SNAKE_CASE = rmtree
__SCREAMING_SNAKE_CASE = rmdir
__SCREAMING_SNAKE_CASE = chdir
@contextlib.contextmanager
def _lowerCAmelCase ( UpperCamelCase_ ):
def signal_handler(UpperCamelCase_ , UpperCamelCase_ ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , UpperCamelCase_ )
signal.signal(signal.SIGALRM , UpperCamelCase_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = WriteOnlyStringIO()
with contextlib.redirect_stdout(UpperCamelCase_ ):
with contextlib.redirect_stderr(UpperCamelCase_ ):
with redirect_stdin(UpperCamelCase_ ):
yield
@contextlib.contextmanager
def _lowerCAmelCase ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(UpperCamelCase_ ):
yield dirname
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE_ ( io.StringIO ):
"""simple docstring"""
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
raise OSError
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
raise OSError
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
raise OSError
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return False
class SCREAMING_SNAKE_CASE_ ( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
__lowercase : str = '''stdin'''
@contextlib.contextmanager
def _lowerCAmelCase ( UpperCamelCase_ ):
if root == ".":
yield
return
__SCREAMING_SNAKE_CASE = os.getcwd()
os.chdir(UpperCamelCase_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
import os
__SCREAMING_SNAKE_CASE = """1"""
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
import shutil
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
import subprocess
__SCREAMING_SNAKE_CASE = None # type: ignore
__SCREAMING_SNAKE_CASE = None
import sys
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
| 370 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _lowerCAmelCase ( *UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_=True , UpperCamelCase_=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE = take_from
__SCREAMING_SNAKE_CASE = ()
if not isinstance(args[0] , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCamelCase_ ).base_version ) >= version.parse(UpperCamelCase_ ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
__SCREAMING_SNAKE_CASE = None
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCamelCase_ ),)
__SCREAMING_SNAKE_CASE = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCamelCase_ , UpperCamelCase_ ):
values += (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
__SCREAMING_SNAKE_CASE = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
__SCREAMING_SNAKE_CASE = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , UpperCamelCase_ , stacklevel=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE = call_frame.filename
__SCREAMING_SNAKE_CASE = call_frame.lineno
__SCREAMING_SNAKE_CASE = call_frame.function
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCamelCase_ ) == 0:
return
elif len(UpperCamelCase_ ) == 1:
return values[0]
return values
| 255 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
_UpperCAmelCase : Union[str, Any] = random.Random()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> str:
if rng is None:
lowerCamelCase__ : Optional[Any] = global_rng
lowerCamelCase__ : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : int , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : Dict=400 , UpperCAmelCase : Tuple=2000 , UpperCAmelCase : Optional[Any]=24 , UpperCAmelCase : List[Any]=24 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Union[str, Any]=16000 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=True , ) -> int:
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : Any = min_seq_length
lowerCamelCase__ : List[str] = max_seq_length
lowerCamelCase__ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase__ : Optional[Any] = feature_size
lowerCamelCase__ : Dict = num_mel_bins
lowerCamelCase__ : Union[str, Any] = padding_value
lowerCamelCase__ : str = sampling_rate
lowerCamelCase__ : Tuple = return_attention_mask
lowerCamelCase__ : Tuple = do_normalize
def A_ ( self : List[str] ) -> List[Any]:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : List[Any]=False ) -> str:
def _flatten(UpperCAmelCase : Tuple ):
return list(itertools.chain(*UpperCAmelCase ) )
if equal_length:
lowerCamelCase__ : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase__ : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase__ : Union[str, Any] = [np.asarray(UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = SpeechaTextFeatureExtractor if is_speech_available() else None
def A_ ( self : Tuple ) -> str:
lowerCamelCase__ : Any = SpeechaTextFeatureExtractionTester(self )
def A_ ( self : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
self.assertTrue(np.all(np.mean(UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def A_ ( self : List[str] ) -> Dict:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase__ : Any = [np.asarray(UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase__ : Optional[int] = feature_extractor(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase__ : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
lowerCamelCase__ : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test batched
lowerCamelCase__ : List[Any] = feature_extractor(UpperCAmelCase , return_tensors='np' ).input_features
lowerCamelCase__ : Union[str, Any] = feature_extractor(UpperCAmelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase__ : str = np.asarray(UpperCAmelCase )
lowerCamelCase__ : List[str] = feature_extractor(UpperCAmelCase , return_tensors='np' ).input_features
lowerCamelCase__ : Tuple = feature_extractor(UpperCAmelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Any ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase__ : str = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase__ : Tuple = [None, 16, None]
for max_length, padding in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : Any = feature_extractor(
UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = inputs.input_features
lowerCamelCase__ : List[str] = inputs.attention_mask
lowerCamelCase__ : Union[str, Any] = [np.sum(UpperCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def A_ ( self : int ) -> Any:
lowerCamelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase__ : Optional[Any] = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase__ : str = [None, 16, None]
for max_length, padding in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : Any = feature_extractor(
UpperCAmelCase , max_length=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='np' , return_attention_mask=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = inputs.input_features
lowerCamelCase__ : List[Any] = inputs.attention_mask
lowerCamelCase__ : Any = [np.sum(UpperCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def A_ ( self : int ) -> Tuple:
lowerCamelCase__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase__ : Dict = feature_extractor(
UpperCAmelCase , padding='max_length' , max_length=4 , truncation=UpperCAmelCase , return_tensors='np' , return_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Optional[Any] = inputs.input_features
lowerCamelCase__ : List[str] = inputs.attention_mask
lowerCamelCase__ : Union[str, Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase__ : List[str] = feature_extractor(
UpperCAmelCase , padding='longest' , max_length=4 , truncation=UpperCAmelCase , return_tensors='np' , return_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : int = inputs.input_features
lowerCamelCase__ : List[str] = inputs.attention_mask
lowerCamelCase__ : List[Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCamelCase__ : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase__ : str = feature_extractor(
UpperCAmelCase , padding='longest' , max_length=16 , truncation=UpperCAmelCase , return_tensors='np' , return_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Optional[Any] = inputs.input_features
lowerCamelCase__ : Optional[int] = inputs.attention_mask
lowerCamelCase__ : Any = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def A_ ( self : Optional[Any] ) -> Optional[int]:
import torch
lowerCamelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Dict = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCamelCase__ : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase__ : Union[str, Any] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase__ : List[Any] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def A_ ( self : int , UpperCAmelCase : List[str] ) -> Any:
from datasets import load_dataset
lowerCamelCase__ : str = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCamelCase__ : str = ds.sort('id' ).select(range(UpperCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def A_ ( self : str ) -> Tuple:
# fmt: off
lowerCamelCase__ : Optional[int] = np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
] )
# fmt: on
lowerCamelCase__ : Any = self._load_datasamples(1 )
lowerCamelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Dict = feature_extractor(UpperCAmelCase , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , UpperCAmelCase , atol=1e-4 ) )
| 50 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] ="""nllb-moe"""
UpperCAmelCase__ : Any =["""past_key_values"""]
UpperCAmelCase__ : Dict ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=1_2_8_1_1_2 , UpperCAmelCase__ : Tuple=1_0_2_4 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : int=4_0_9_6 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : Union[str, Any]=1_2 , UpperCAmelCase__ : int=4_0_9_6 , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : Union[str, Any]=0.05 , UpperCAmelCase__ : Any=0.05 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]="relu" , UpperCAmelCase__ : Dict=1_0_2_4 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="float32" , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Union[str, Any]=1_2_8 , UpperCAmelCase__ : Any=6_4 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Optional[Any]=0.0_01 , UpperCAmelCase__ : Optional[Any]=0.0_01 , UpperCAmelCase__ : Dict="all" , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : List[str]=1.0 , UpperCAmelCase__ : Optional[int]=0.2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Tuple=False , **UpperCAmelCase__ : Union[str, Any] , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : str = init_std
SCREAMING_SNAKE_CASE : Tuple = encoder_layerdrop
SCREAMING_SNAKE_CASE : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : Tuple = router_z_loss_coef
SCREAMING_SNAKE_CASE : Tuple = router_aux_loss_coef
SCREAMING_SNAKE_CASE : List[Any] = decoder_sparse_step
SCREAMING_SNAKE_CASE : Any = encoder_sparse_step
SCREAMING_SNAKE_CASE : Tuple = num_experts
SCREAMING_SNAKE_CASE : Optional[int] = expert_capacity
SCREAMING_SNAKE_CASE : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Optional[int] = router_dtype
SCREAMING_SNAKE_CASE : Any = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : Any = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[Any] = second_expert_policy
SCREAMING_SNAKE_CASE : Any = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Tuple = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : int = moe_token_dropout
SCREAMING_SNAKE_CASE : Optional[int] = output_router_logits
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 245 | 0 |
def lowerCamelCase__ ( A : list[int] ):
'''simple docstring'''
UpperCAmelCase = []
if len(A ) == 1:
return [nums.copy()]
for _ in range(len(A ) ):
UpperCAmelCase = nums.pop(0 )
UpperCAmelCase = permute(A )
for perm in permutations:
perm.append(A )
result.extend(A )
nums.append(A )
return result
def lowerCamelCase__ ( A : Dict ):
'''simple docstring'''
def backtrack(A : List[Any] ):
if start == len(A ) - 1:
output.append(nums[:] )
else:
for i in range(A , len(A ) ):
UpperCAmelCase , UpperCAmelCase = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase , UpperCAmelCase = nums[i], nums[start] # backtrack
UpperCAmelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_lowercase : Optional[int] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[str] = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase ):
return len(set(_UpperCamelCase ) ) == len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( _snake_case , unittest.TestCase ):
lowercase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def snake_case_ ( self , UpperCamelCase__=0 ) -> Tuple:
'''simple docstring'''
A_ = np.random.RandomState(UpperCamelCase__ )
A_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs["""prompt"""]]
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs.pop("""prompt""" )]
A_ = pipe.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""np""" , )
A_ = text_inputs["""input_ids"""]
A_ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
A_ = prompt_embeds
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = 3 * ["""this is a negative prompt"""]
A_ = negative_prompt
A_ = 3 * [inputs["""prompt"""]]
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs.pop("""prompt""" )]
A_ = []
for p in [prompt, negative_prompt]:
A_ = pipe.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""np""" , )
A_ = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
A_ , A_ = embeds
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
@property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = ort.SessionOptions()
A_ = False
return options
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# using the PNDM scheduler by default
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
A_ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """open neural network exchange"""
A_ = np.random.RandomState(0 )
A_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """open neural network exchange"""
A_ = np.random.RandomState(0 )
A_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = 0
def test_callback_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
A_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
A_ = False
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """Andromeda galaxy in a bottle"""
A_ = np.random.RandomState(0 )
pipe(
prompt=UpperCamelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert pipe.safety_checker is None
A_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
A_ = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 162 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[float]) -> bool:
'''simple docstring'''
if len(_lowerCamelCase) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space")
if any(i <= 0 for i in nums):
raise ValueError("All values must be greater than 0")
__UpperCamelCase : Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1])
if __name__ == "__main__":
import doctest
doctest.testmod() | 359 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 151 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _lowerCAmelCase ( UpperCamelCase_="ro" , UpperCamelCase_="en" , UpperCamelCase_="wmt16" , UpperCamelCase_=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__SCREAMING_SNAKE_CASE = f"{src_lang}-{tgt_lang}"
print(f"Converting {dataset}-{pair}" )
__SCREAMING_SNAKE_CASE = datasets.load_dataset(UpperCamelCase_ , UpperCamelCase_ )
if save_dir is None:
__SCREAMING_SNAKE_CASE = f"{dataset}-{pair}"
__SCREAMING_SNAKE_CASE = Path(UpperCamelCase_ )
save_dir.mkdir(exist_ok=UpperCamelCase_ )
for split in ds.keys():
print(f"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__SCREAMING_SNAKE_CASE = """val""" if split == """validation""" else split
__SCREAMING_SNAKE_CASE = save_dir.joinpath(f"{fn}.source" )
__SCREAMING_SNAKE_CASE = save_dir.joinpath(f"{fn}.target" )
__SCREAMING_SNAKE_CASE = src_path.open("""w+""" )
__SCREAMING_SNAKE_CASE = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__SCREAMING_SNAKE_CASE = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 100 |
"""simple docstring"""
__magic_name__ = "Tobias Carryer"
from time import time
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=int(time())): # noqa: B008
__SCREAMING_SNAKE_CASE = multiplier
__SCREAMING_SNAKE_CASE = increment
__SCREAMING_SNAKE_CASE = modulo
__SCREAMING_SNAKE_CASE = seed
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__magic_name__ = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 100 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Optional[int] = value
_SCREAMING_SNAKE_CASE : Node | None = None
_SCREAMING_SNAKE_CASE : Node | None = None
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = tree
def UpperCamelCase_ ( self , __lowerCamelCase ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 325 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : List[Any] = 1_3
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = 9_9
_SCREAMING_SNAKE_CASE : str = 3_8_4
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Dict = 3_7
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu"
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = 5_1_2
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : Any = 0.02
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 1_2_8
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : int = 9
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
if hasattr(__lowerCamelCase , "use_cache" ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"]
else:
_SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) | 325 | 1 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = 'ylacombe/bark-small'
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = 'en_speaker_1'
UpperCAmelCase__ = 'This is a test string'
UpperCAmelCase__ = 'speaker_embeddings_path.json'
UpperCAmelCase__ = 'speaker_embeddings'
def __lowerCAmelCase ( self : str ,**lowerCamelCase__ : Tuple ):
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = BarkProcessor(tokenizer=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase__ = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
UpperCAmelCase__ = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token='(BOS)' ,eos_token='(EOS)' ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase__ = 35
UpperCAmelCase__ = 2
UpperCAmelCase__ = 8
UpperCAmelCase__ = {
'semantic_prompt': np.ones(lowerCamelCase__ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase__ = processor(text=self.input_string ,voice_preset=lowerCamelCase__ )
UpperCAmelCase__ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase__ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase__ = os.path.join(self.tmpdirname ,'file.npz' )
np.savez(lowerCamelCase__ ,**lowerCamelCase__ )
UpperCAmelCase__ = processor(text=self.input_string ,voice_preset=lowerCamelCase__ )
UpperCAmelCase__ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase__ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase__ = processor(text=self.input_string ,voice_preset=self.voice_preset )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = BarkProcessor(tokenizer=lowerCamelCase__ )
UpperCAmelCase__ = processor(text=self.input_string )
UpperCAmelCase__ = tokenizer(
self.input_string ,padding='max_length' ,max_length=256 ,add_special_tokens=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 98 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase: List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Union[str, Any] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Optional[int] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_UpperCamelCase: Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 255 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_snake_case : Dict = 0
_snake_case : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_snake_case : Tuple = tuple[int, int]
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None , ) -> None:
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = g_cost
__lowerCAmelCase = parent
__lowerCAmelCase = self.calculate_heuristic()
__lowerCAmelCase = self.g_cost + self.h_cost
def lowercase ( self : Any ) -> float:
__lowerCAmelCase = self.pos_x - self.goal_x
__lowerCAmelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase_ ) + abs(lowerCAmelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Union[str, Any] , lowerCAmelCase_ : Node ) -> bool:
return self.f_cost < other.f_cost
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> Tuple:
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase_ )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , lowerCAmelCase_ )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = []
__lowerCAmelCase = False
def lowercase ( self : str ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase_ )
self.closed_nodes.append(lowerCAmelCase_ )
__lowerCAmelCase = self.get_successors(lowerCAmelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
__lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase_ )
else:
self.open_nodes.append(lowerCAmelCase_ )
return [self.start.pos]
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Node ) -> list[Node]:
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase_ , ) )
return successors
def lowercase ( self : Tuple , lowerCAmelCase_ : Node | None ) -> list[TPosition]:
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : TPosition , lowerCAmelCase_ : TPosition ) -> None:
__lowerCAmelCase = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = False
def lowercase ( self : Dict ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowerCAmelCase = self.fwd_astar.open_nodes.pop(0 )
__lowerCAmelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase_ , lowerCAmelCase_ )
self.fwd_astar.closed_nodes.append(lowerCAmelCase_ )
self.bwd_astar.closed_nodes.append(lowerCAmelCase_ )
__lowerCAmelCase = current_bwd_node
__lowerCAmelCase = current_fwd_node
__lowerCAmelCase = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
__lowerCAmelCase = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase_ )
else:
astar.open_nodes.append(lowerCAmelCase_ )
return [self.fwd_astar.start.pos]
def lowercase ( self : Dict , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> list[TPosition]:
__lowerCAmelCase = self.fwd_astar.retrace_path(lowerCAmelCase_ )
__lowerCAmelCase = self.bwd_astar.retrace_path(lowerCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
__lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_snake_case : List[Any] = (0, 0)
_snake_case : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case : int = time.time()
_snake_case : Optional[int] = AStar(init, goal)
_snake_case : int = a_star.search()
_snake_case : Union[str, Any] = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
_snake_case : Any = time.time()
_snake_case : Dict = BidirectionalAStar(init, goal)
_snake_case : Optional[int] = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 207 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_snake_case : int = True
except (ImportError, AttributeError):
_snake_case : int = object
def a_ ( *lowerCAmelCase_ : List[str], **lowerCAmelCase_ : Optional[Any] ):
pass
_snake_case : Union[str, Any] = False
_snake_case : int = logging.get_logger('transformers-cli/serving')
def a_ ( lowerCAmelCase_ : Namespace ):
__lowerCAmelCase = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(lowerCAmelCase_, args.host, args.port, args.workers )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@staticmethod
def lowercase ( lowerCAmelCase_ : ArgumentParser ) -> Union[str, Any]:
__lowerCAmelCase = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=lowerCAmelCase_ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=lowerCAmelCase_ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=lowerCAmelCase_ , default=8_8_8_8 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=lowerCAmelCase_ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=lowerCAmelCase_ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=lowerCAmelCase_ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase_ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=lowerCAmelCase_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : List[str] , lowerCAmelCase_ : Pipeline , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> str:
__lowerCAmelCase = pipeline
__lowerCAmelCase = host
__lowerCAmelCase = port
__lowerCAmelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
__lowerCAmelCase = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
] , timeout=6_0_0 , )
def lowercase ( self : Tuple ) -> str:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase ( self : Any ) -> List[str]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase ( self : int , lowerCAmelCase_ : str = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> Dict:
try:
__lowerCAmelCase = self._pipeline.tokenizer.tokenize(lowerCAmelCase_ )
if return_ids:
__lowerCAmelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
return ServeTokenizeResult(tokens=lowerCAmelCase_ , tokens_ids=lowerCAmelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'model': '', 'error': str(lowerCAmelCase_ )} )
def lowercase ( self : int , lowerCAmelCase_ : List[int] = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , ) -> Union[str, Any]:
try:
__lowerCAmelCase = self._pipeline.tokenizer.decode(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return ServeDeTokenizeResult(model='' , text=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'model': '', 'error': str(lowerCAmelCase_ )} )
async def lowercase ( self : List[Any] , lowerCAmelCase_ : Union[str, Any]=Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> int:
# Check we don't have empty string
if len(lowerCAmelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__lowerCAmelCase = self._pipeline(lowerCAmelCase_ )
return ServeForwardResult(output=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(5_0_0 , {'error': str(lowerCAmelCase_ )} )
| 207 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__="resnet50", lowerCamelCase__=3, lowerCamelCase__=32, lowerCamelCase__=3, lowerCamelCase__=True, lowerCamelCase__=True, ):
A : Dict = parent
A : Tuple = out_indices if out_indices is not None else [4]
A : Optional[int] = stage_names
A : Any = out_features
A : List[str] = backbone
A : Union[str, Any] = batch_size
A : Optional[Any] = image_size
A : Dict = num_channels
A : Any = use_pretrained_backbone
A : Any = is_training
def _lowerCAmelCase ( self ):
A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : List[Any] = self.get_config()
return config, pixel_values
def _lowerCAmelCase ( self ):
return TimmBackboneConfig(
image_size=self.image_size, num_channels=self.num_channels, out_features=self.out_features, out_indices=self.out_indices, stage_names=self.stage_names, use_pretrained_backbone=self.use_pretrained_backbone, backbone=self.backbone, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : Any = TimmBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
A : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.feature_map[-1].shape, (self.batch_size, model.channels[-1], 14, 14), )
def _lowerCAmelCase ( self ):
A : List[str] = self.prepare_config_and_inputs()
A : Optional[Any] = config_and_inputs
A : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (TimmBackbone,) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : int = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : str = False
def _lowerCAmelCase ( self ):
A : Dict = TimmBackboneModelTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowercase_, has_text_modality=lowercase_ )
def _lowerCAmelCase ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self ):
A : Union[str, Any] = '''resnet18'''
A : List[Any] = '''microsoft/resnet-18'''
A : str = AutoBackbone.from_pretrained(lowercase_, use_timm_backbone=lowercase_ )
A : str = AutoBackbone.from_pretrained(lowercase_ )
self.assertEqual(len(timm_model.out_features ), len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ), len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels, transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices, (-1,) )
self.assertEqual(transformers_model.out_indices, [len(timm_model.stage_names ) - 1] )
A : Optional[int] = AutoBackbone.from_pretrained(lowercase_, use_timm_backbone=lowercase_, out_indices=[1, 2, 3] )
A : Optional[Any] = AutoBackbone.from_pretrained(lowercase_, out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices, transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ), len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels, transformers_model.channels )
@unittest.skip("""TimmBackbone doesn\'t support feed forward chunking""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""TimmBackbone doesn\'t have num_hidden_layers attribute""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""TimmBackbone doesn\'t have hidden size info in its configuration.""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""TimmBackbone doesn\'t support output_attentions.""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(lowercase_ )
A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[Any] = [*signature.parameters.keys()]
A : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowercase_ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A : List[str] = True
A : Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
A : Optional[int] = self.all_model_classes[0]
A : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
A : Optional[Any] = self._prepare_for_class(lowercase_, lowercase_ )
A : List[str] = model(**lowercase_ )
A : int = outputs[0][-1]
# Encoder-/Decoder-only models
A : List[str] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
A : Any = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _lowerCAmelCase ( self ):
A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
A : int = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ), len(config.out_indices ) )
self.assertEqual(len(model.channels ), len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
A : List[str] = copy.deepcopy(lowercase_ )
A : int = None
A : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
A : Union[str, Any] = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ), 1 )
self.assertEqual(len(model.channels ), 1 )
# Check backbone can be initialized with fresh weights
A : int = copy.deepcopy(lowercase_ )
A : List[str] = False
A : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
A : Dict = model(**lowercase_ )
| 116 |
"""simple docstring"""
import random
from typing import Any
def _A (__a ) -> list[Any]:
"""simple docstring"""
for _ in range(len(__a ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] = random.randint(0 , len(__a ) - 1 )
SCREAMING_SNAKE_CASE_ : Tuple = random.randint(0 , len(__a ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 91 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a=2 , a=3 , a=64 , a=None):
lowercase__ : List[str] = np.random.default_rng(a)
lowercase__ : Tuple = length
lowercase__ : List[str] = rng.normal(size=(length,)).astype(np.floataa)
lowercase__ : Any = a * self.x + b + rng.normal(scale=0.1 , size=(length,)).astype(np.floataa)
def __len__( self):
return self.length
def __getitem__( self , a):
return {"x": self.x[i], "y": self.y[i]}
class SCREAMING_SNAKE_CASE__ (torch.nn.Module ):
def __init__( self , a=0 , a=0 , a=False):
super().__init__()
lowercase__ : int = torch.nn.Parameter(torch.tensor([2, 3]).float())
lowercase__ : str = torch.nn.Parameter(torch.tensor([2, 3]).float())
lowercase__ : Dict = True
def snake_case_ ( self , a=None):
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""")
lowercase__ : Tuple = False
return x * self.a[0] + self.b[0]
class SCREAMING_SNAKE_CASE__ (torch.nn.Module ):
def __init__( self , a=0 , a=0 , a=False):
super().__init__()
lowercase__ : int = torch.nn.Parameter(torch.tensor(a).float())
lowercase__ : str = torch.nn.Parameter(torch.tensor(a).float())
lowercase__ : List[Any] = True
def snake_case_ ( self , a=None):
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""")
lowercase__ : Optional[Any] = False
return x * self.a + self.b
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
lowercase__ : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase__ : int = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
lowercase__ : Dict = load_dataset('csv' , data_files=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = datasets['train'].unique('label' )
lowercase__ : int = {v: i for i, v in enumerate(SCREAMING_SNAKE_CASE_ )}
def tokenize_function(SCREAMING_SNAKE_CASE_ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Union[str, Any] = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
if "label" in examples:
lowercase__ : Tuple = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Dict = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(SCREAMING_SNAKE_CASE_ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Optional[int] = DataLoader(tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=2 )
lowercase__ : Union[str, Any] = DataLoader(tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 216 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Tuple = ["""image_processor""", """tokenizer"""]
__lowerCamelCase : int = """ViltImageProcessor"""
__lowerCamelCase : Union[str, Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , a=None , a=None , **a):
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowercase__ : Union[str, Any] = kwargs.pop('feature_extractor')
lowercase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(a , a)
lowercase__ : str = self.image_processor
def __call__( self , a , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = None , a = False , a = False , a = False , a = False , a = True , a = None , **a , ):
lowercase__ : Optional[Any] = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel_values + pixel_mask
lowercase__ : str = self.image_processor(a , return_tensors=a)
encoding.update(a)
return encoding
def snake_case_ ( self , *a , **a):
return self.tokenizer.batch_decode(*a , **a)
def snake_case_ ( self , *a , **a):
return self.tokenizer.decode(*a , **a)
@property
def snake_case_ ( self):
lowercase__ : int = self.tokenizer.model_input_names
lowercase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def snake_case_ ( self):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def snake_case_ ( self):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor
| 216 | 1 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowercase : int ="src/transformers"
_lowercase : Optional[Any] ="docs/source/en"
_lowercase : Any ="."
def lowerCAmelCase_ ( _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any]) -> Dict:
"""simple docstring"""
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
a__ : str = f.readlines()
# Find the start prompt.
a__ : Dict = 0
while not lines[start_index].startswith(UpperCAmelCase_):
start_index += 1
start_index += 1
a__ : List[Any] = start_index
while not lines[end_index].startswith(UpperCAmelCase_):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowercase : Union[str, Any] ="Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_lowercase : Any =re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowercase : Tuple =re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : List[Any] =re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Optional[Any] =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> List[str]:
"""simple docstring"""
a__ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase_)
return [m.group(0) for m in matches]
def lowerCAmelCase_ ( _lowercase : Union[str, Any] , _lowercase : Optional[Any]) -> str:
"""simple docstring"""
a__ : Dict = 2 if text == '✅' or text == '❌' else len(UpperCAmelCase_)
a__ : Optional[int] = (width - text_length) // 2
a__ : Optional[int] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
a__ : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
a__ : int = {name: config.replace("""Config""" , """""") for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
a__ : Optional[Any] = collections.defaultdict(UpperCAmelCase_)
a__ : Union[str, Any] = collections.defaultdict(UpperCAmelCase_)
a__ : Any = collections.defaultdict(UpperCAmelCase_)
a__ : Union[str, Any] = collections.defaultdict(UpperCAmelCase_)
a__ : Dict = collections.defaultdict(UpperCAmelCase_)
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase_):
a__ : Optional[Any] = None
if attr_name.endswith("""Tokenizer"""):
a__ : Optional[int] = slow_tokenizers
a__ : Optional[int] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast"""):
a__ : Tuple = fast_tokenizers
a__ : str = attr_name[:-13]
elif _re_tf_models.match(UpperCAmelCase_) is not None:
a__ : Tuple = tf_models
a__ : List[str] = _re_tf_models.match(UpperCAmelCase_).groups()[0]
elif _re_flax_models.match(UpperCAmelCase_) is not None:
a__ : Optional[int] = flax_models
a__ : Optional[int] = _re_flax_models.match(UpperCAmelCase_).groups()[0]
elif _re_pt_models.match(UpperCAmelCase_) is not None:
a__ : Dict = pt_models
a__ : int = _re_pt_models.match(UpperCAmelCase_).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase_) > 0:
if attr_name in model_name_to_prefix.values():
a__ : Any = True
break
# Try again after removing the last word in the name
a__ : Optional[Any] = ''.join(camel_case_split(UpperCAmelCase_)[:-1])
# Let's build that table!
a__ : Any = list(model_name_to_config.keys())
model_names.sort(key=str.lower)
a__ : int = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
a__ : Tuple = [len(UpperCAmelCase_) + 2 for c in columns]
a__ : Any = max([len(UpperCAmelCase_) for name in model_names]) + 2
# Build the table per se
a__ : Dict = '|' + '|'.join([_center_text(UpperCAmelCase_ , UpperCAmelCase_) for c, w in zip(UpperCAmelCase_ , UpperCAmelCase_)]) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths]) + "|\n"
a__ : Any = {True: '✅', False: '❌'}
for name in model_names:
a__ : Optional[int] = model_name_to_prefix[name]
a__ : Optional[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase_ , UpperCAmelCase_) for l, w in zip(UpperCAmelCase_ , UpperCAmelCase_)]) + "|\n"
return table
def lowerCAmelCase_ ( _lowercase : Tuple=False) -> str:
"""simple docstring"""
a__ : Any = _find_text_in_file(
filename=os.path.join(UpperCAmelCase_ , """index.md""") , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
a__ : Dict = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase_ , """index.md""") , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:])
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""")
if __name__ == "__main__":
_lowercase : str =argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowercase : str =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 170 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCAmelCase : Tuple = sum(array[:k] )
for i in range(len(UpperCAmelCase_ ) - k ):
UpperCAmelCase : Optional[Any] = current_sum - array[i] + array[i + k]
UpperCAmelCase : List[Any] = max(UpperCAmelCase_ , UpperCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase__ = [randint(-1000, 1000) for i in range(100)]
lowercase__ = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 151 | 0 |
def snake_case (__lowercase , __lowercase ) -> Any:
'''simple docstring'''
return int(input_a == input_a == 0 )
def snake_case () -> Any:
'''simple docstring'''
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 353 | from __future__ import annotations
import requests
__SCREAMING_SNAKE_CASE : Tuple = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def snake_case (__lowercase , __lowercase = 1 , __lowercase = "new" , __lowercase = None ) -> dict:
'''simple docstring'''
_snake_case : Union[str, Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowercase ) - valid_terms ) ):
_snake_case : List[str] = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__lowercase )
_snake_case : Any = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowercase )}
_snake_case : Union[str, Any] = {}
for id_ in range(__lowercase ):
_snake_case : Dict = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext'])) | 284 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=0.999 , SCREAMING_SNAKE_CASE : Optional[Any]="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__lowercase = []
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = i / num_diffusion_timesteps
__lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE ) / alpha_bar_fn(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase__ : int = 2
@register_to_config
def __init__( self : List[str] , _UpperCAmelCase : int = 10_00 , _UpperCAmelCase : float = 0.00_085 , _UpperCAmelCase : float = 0.012 , _UpperCAmelCase : str = "linear" , _UpperCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , _UpperCAmelCase : str = "epsilon" , _UpperCAmelCase : Optional[bool] = False , _UpperCAmelCase : Optional[bool] = False , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : str = "linspace" , _UpperCAmelCase : int = 0 , ) -> str:
"""simple docstring"""
if trained_betas is not None:
__lowercase = torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowercase = torch.linspace(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowercase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowercase = betas_for_alpha_bar(_UpperCAmelCase , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
__lowercase = betas_for_alpha_bar(_UpperCAmelCase , alpha_transform_type='exp' )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
__lowercase = 1.0 - self.betas
__lowercase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = use_karras_sigmas
def a__ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any=None ) -> Any:
"""simple docstring"""
if schedule_timesteps is None:
__lowercase = self.timesteps
__lowercase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowercase = 1 if len(_UpperCAmelCase ) > 1 else 0
else:
__lowercase = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase ) else timestep
__lowercase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def a__ ( self : Optional[int] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
"""simple docstring"""
__lowercase = self.index_for_timestep(_UpperCAmelCase )
__lowercase = self.sigmas[step_index]
__lowercase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def a__ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, torch.device] = None , _UpperCAmelCase : Optional[int] = None , ) -> Optional[int]:
"""simple docstring"""
__lowercase = num_inference_steps
__lowercase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowercase = np.linspace(0 , num_train_timesteps - 1 , _UpperCAmelCase , dtype=_UpperCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowercase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase = (np.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(_UpperCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowercase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase = (np.arange(_UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(_UpperCAmelCase )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
__lowercase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowercase = np.log(_UpperCAmelCase )
__lowercase = np.interp(_UpperCAmelCase , np.arange(0 , len(_UpperCAmelCase ) ) , _UpperCAmelCase )
if self.config.use_karras_sigmas:
__lowercase = self._convert_to_karras(in_sigmas=_UpperCAmelCase , num_inference_steps=self.num_inference_steps )
__lowercase = np.array([self._sigma_to_t(_UpperCAmelCase , _UpperCAmelCase ) for sigma in sigmas] )
__lowercase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowercase = torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase )
__lowercase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__lowercase = torch.from_numpy(_UpperCAmelCase )
__lowercase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_UpperCAmelCase ).startswith('mps' ):
# mps does not support float64
__lowercase = timesteps.to(_UpperCAmelCase , dtype=torch.floataa )
else:
__lowercase = timesteps.to(device=_UpperCAmelCase )
# empty dt and derivative
__lowercase = None
__lowercase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowercase = defaultdict(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = np.log(_UpperCAmelCase )
# get distribution
__lowercase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__lowercase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__lowercase = low_idx + 1
__lowercase = log_sigmas[low_idx]
__lowercase = log_sigmas[high_idx]
# interpolate sigmas
__lowercase = (low - log_sigma) / (low - high)
__lowercase = np.clip(_UpperCAmelCase , 0 , 1 )
# transform interpolation to time range
__lowercase = (1 - w) * low_idx + w * high_idx
__lowercase = t.reshape(sigma.shape )
return t
def a__ ( self : Any , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int ) -> torch.FloatTensor:
"""simple docstring"""
__lowercase = in_sigmas[-1].item()
__lowercase = in_sigmas[0].item()
__lowercase = 7.0 # 7.0 is the value used in the paper
__lowercase = np.linspace(0 , 1 , _UpperCAmelCase )
__lowercase = sigma_min ** (1 / rho)
__lowercase = sigma_max ** (1 / rho)
__lowercase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.dt is None
def a__ ( self : List[str] , _UpperCAmelCase : Union[torch.FloatTensor, np.ndarray] , _UpperCAmelCase : Union[float, torch.FloatTensor] , _UpperCAmelCase : Union[torch.FloatTensor, np.ndarray] , _UpperCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
__lowercase = self.index_for_timestep(_UpperCAmelCase )
# advance index counter by 1
__lowercase = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowercase = self.sigmas[step_index]
__lowercase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__lowercase = self.sigmas[step_index - 1]
__lowercase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowercase = 0
__lowercase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowercase = sigma_hat if self.state_in_first_order else sigma_next
__lowercase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowercase = sigma_hat if self.state_in_first_order else sigma_next
__lowercase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__lowercase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
__lowercase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowercase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowercase = sigma_next - sigma_hat
# store for 2nd order step
__lowercase = derivative
__lowercase = dt
__lowercase = sample
else:
# 2. 2nd order / Heun's method
__lowercase = (sample - pred_original_sample) / sigma_next
__lowercase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__lowercase = self.dt
__lowercase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
__lowercase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_UpperCAmelCase ):
# mps does not support float64
__lowercase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowercase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowercase = self.timesteps.to(original_samples.device )
__lowercase = timesteps.to(original_samples.device )
__lowercase = [self.index_for_timestep(_UpperCAmelCase , _UpperCAmelCase ) for t in timesteps]
__lowercase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowercase = sigma.unsqueeze(-1 )
__lowercase = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ) -> List[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 325 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split('/' )
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
__lowercase = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 325 | 1 |
from abc import ABC, abstractmethod
from typing import List, Optional
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] ):
# test for the above condition
self.test()
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[Any] = 0
lowercase__ : str = False
while not completed:
if counter == 1:
self.reset()
lowercase__ : List[Any] = self.advance()
if not self.does_advance(SCREAMING_SNAKE_CASE ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
lowercase__ , lowercase__ , lowercase__ : str = self.update(SCREAMING_SNAKE_CASE )
counter += 1
if counter > 10_000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def snake_case ( self : Any ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : int ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def snake_case ( self : Dict ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def snake_case ( self : List[Any] ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple=False ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[int] ):
super(SCREAMING_SNAKE_CASE , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
lowercase__ : Optional[Any] = token_ids
lowercase__ : Tuple = len(self.token_ids )
lowercase__ : Dict = -1 # the index of the currently fulfilled step
lowercase__ : List[str] = False
def snake_case ( self : int ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int ):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int ):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE )}""" )
lowercase__ : int = False
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
if self.does_advance(SCREAMING_SNAKE_CASE ):
self.fulfilled_idx += 1
lowercase__ : Tuple = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = completed
else:
# failed to make progress.
lowercase__ : List[Any] = True
self.reset()
return stepped, completed, reset
def snake_case ( self : Optional[int] ):
lowercase__ : Any = False
lowercase__ : List[str] = 0
def snake_case ( self : int ):
return self.seqlen - (self.fulfilled_idx + 1)
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any=False ):
lowercase__ : List[str] = PhrasalConstraint(self.token_ids )
if stateful:
lowercase__ : List[Any] = self.seqlen
lowercase__ : Dict = self.fulfilled_idx
lowercase__ : str = self.completed
return new_constraint
class snake_case__:
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : List[List[int]] , SCREAMING_SNAKE_CASE : Optional[Any]=True ):
lowercase__ : Dict = max([len(SCREAMING_SNAKE_CASE ) for one in nested_token_ids] )
lowercase__ : str = {}
for token_ids in nested_token_ids:
lowercase__ : Union[str, Any] = root
for tidx, token_id in enumerate(SCREAMING_SNAKE_CASE ):
if token_id not in level:
lowercase__ : str = {}
lowercase__ : Optional[Any] = level[token_id]
if no_subsets and self.has_subsets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
f""" {nested_token_ids}.""" )
lowercase__ : Any = root
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : int ):
lowercase__ : str = self.trie
for current_token in current_seq:
lowercase__ : Optional[Any] = start[current_token]
lowercase__ : Optional[Any] = list(start.keys() )
return next_tokens
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Optional[Any] = self.next_tokens(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE ) == 0
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Optional[Any] = list(root.values() )
if len(SCREAMING_SNAKE_CASE ) == 0:
return 1
else:
return sum([self.count_leaves(SCREAMING_SNAKE_CASE ) for nn in next_nodes] )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[str] = self.count_leaves(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE ) != leaf_count
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : List[List[int]] ):
super(SCREAMING_SNAKE_CASE , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
lowercase__ : int = DisjunctiveTrie(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = nested_token_ids
lowercase__ : Tuple = self.trie.max_height
lowercase__ : Tuple = []
lowercase__ : List[str] = False
def snake_case ( self : Tuple ):
lowercase__ : int = self.trie.next_tokens(self.current_seq )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
else:
return token_list
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : int ):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE )}""" )
lowercase__ : Optional[int] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int ):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE )}""" )
lowercase__ : List[Any] = False
lowercase__ : Any = False
lowercase__ : Dict = False
if self.does_advance(SCREAMING_SNAKE_CASE ):
self.current_seq.append(SCREAMING_SNAKE_CASE )
lowercase__ : str = True
else:
lowercase__ : str = True
self.reset()
lowercase__ : Optional[int] = self.trie.reached_leaf(self.current_seq )
lowercase__ : Dict = completed
return stepped, completed, reset
def snake_case ( self : Dict ):
lowercase__ : List[str] = False
lowercase__ : Tuple = []
def snake_case ( self : Dict ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any]=False ):
lowercase__ : Dict = DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase__ : Any = self.seqlen
lowercase__ : Union[str, Any] = self.current_seq
lowercase__ : Dict = self.completed
return new_constraint
class snake_case__:
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : List[Constraint] ):
lowercase__ : Any = constraints
# max # of steps required to fulfill a given constraint
lowercase__ : List[str] = max([c.seqlen for c in constraints] )
lowercase__ : int = len(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = False
self.init_state()
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[int] = []
lowercase__ : Tuple = None
lowercase__ : Optional[int] = [constraint.copy(stateful=SCREAMING_SNAKE_CASE ) for constraint in self.constraints]
def snake_case ( self : Optional[Any] ):
lowercase__ : List[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def snake_case ( self : str ):
lowercase__ : Any = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase__ : Optional[int] = constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
token_list.append(SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
token_list.extend(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Tuple = self.inprogress_constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
token_list.append(SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
token_list.extend(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
else:
return token_list
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[List[int]] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase__ , lowercase__ : Optional[int] = self.add(SCREAMING_SNAKE_CASE )
# the entire list of constraints are fulfilled
if self.completed:
break
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
lowercase__ , lowercase__ : int = False, False
if self.completed:
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase__ , lowercase__ , lowercase__ : Dict = self.inprogress_constraint.update(SCREAMING_SNAKE_CASE )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase__ : Optional[int] = None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase__ : Union[str, Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(SCREAMING_SNAKE_CASE ):
lowercase__ , lowercase__ , lowercase__ : Any = pending_constraint.update(SCREAMING_SNAKE_CASE )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(SCREAMING_SNAKE_CASE )
lowercase__ : int = None
if not complete and stepped:
lowercase__ : Dict = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase__ : str = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase__ : List[str] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[Any]=True ):
lowercase__ : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase__ : Optional[int] = [
constraint.copy(stateful=SCREAMING_SNAKE_CASE ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase__ : Dict = self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 121 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Collection[float] | None = None ):
if components is None:
lowercase__ : List[Any] = []
lowercase__ : str = list(SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return len(self.__components )
def __str__( self : int ):
return "(" + ",".join(map(SCREAMING_SNAKE_CASE , self.__components ) ) + ")"
def __add__( self : List[Any] , SCREAMING_SNAKE_CASE : Vector ):
lowercase__ : Optional[Any] = len(self )
if size == len(SCREAMING_SNAKE_CASE ):
lowercase__ : int = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return Vector(SCREAMING_SNAKE_CASE )
else:
raise Exception("must have the same size" )
def __sub__( self : Any , SCREAMING_SNAKE_CASE : Vector ):
lowercase__ : Any = len(self )
if size == len(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return Vector(SCREAMING_SNAKE_CASE )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : int , SCREAMING_SNAKE_CASE : float ):
...
@overload
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : Vector ):
...
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : float | Vector ):
if isinstance(SCREAMING_SNAKE_CASE , (float, int) ):
lowercase__ : Dict = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(self ) == len(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = len(self )
lowercase__ : Union[str, Any] = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return sum(SCREAMING_SNAKE_CASE )
else: # error case
raise Exception("invalid operand!" )
def snake_case ( self : Any ):
return Vector(self.__components )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ):
assert -len(self.__components ) <= pos < len(self.__components )
lowercase__ : int = value
def snake_case ( self : Any ):
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
lowercase__ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Vector , SCREAMING_SNAKE_CASE : bool = False ):
lowercase__ : Optional[Any] = self * other
lowercase__ : Union[str, Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
return Vector([0] * dimension )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (isinstance(lowerCamelCase__ , lowerCamelCase__ ))
lowercase__ : Tuple = [0] * dimension
lowercase__ : Union[str, Any] = 1
return Vector(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and isinstance(lowerCamelCase__ , lowerCamelCase__ )
and (isinstance(lowerCamelCase__ , (int, float) ))
)
return x * scalar + y
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
random.seed(lowerCamelCase__ )
lowercase__ : Optional[Any] = [random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : list[list[float]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[str] = matrix
lowercase__ : int = w
lowercase__ : str = h
def __str__( self : int ):
lowercase__ : int = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : str , SCREAMING_SNAKE_CASE : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Union[str, Any] = []
for i in range(self.__height ):
lowercase__ : int = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE )
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Dict , SCREAMING_SNAKE_CASE : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Dict = []
for i in range(self.__height ):
lowercase__ : Optional[Any] = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE )
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Any , SCREAMING_SNAKE_CASE : float ):
...
@overload
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : Vector ):
...
def __mul__( self : Optional[Any] , SCREAMING_SNAKE_CASE : float | Vector ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # matrix-vector
if len(SCREAMING_SNAKE_CASE ) == self.__width:
lowercase__ : Any = zero_vector(self.__height )
for i in range(self.__height ):
lowercase__ : Union[str, Any] = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
ans.change_component(SCREAMING_SNAKE_CASE , sum(SCREAMING_SNAKE_CASE ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(SCREAMING_SNAKE_CASE , (int, float) ): # matrix-scalar
lowercase__ : str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
return None
def snake_case ( self : Union[str, Any] ):
return self.__height
def snake_case ( self : Optional[Any] ):
return self.__width
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase__ : Union[str, Any] = value
else:
raise Exception("change_component: indices out of bounds" )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
lowercase__ : str = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
raise Exception("Indices out of bounds" )
def snake_case ( self : List[str] ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase__ : Dict = [
self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE ) for y in range(self.__width )
]
return sum(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : list[list[float]] = [[0] * n for _ in range(lowerCamelCase__ )]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
random.seed(lowerCamelCase__ )
lowercase__ : list[list[float]] = [
[random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )
]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 121 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = TextToVideoSDPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowercase__ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowercase__ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D'''), up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D'''), cross_attention_dim=32, attention_head_dim=4, )
lowercase__ = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=lowerCamelCase, set_alpha_to_one=lowerCamelCase, )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='''gelu''', projection_dim=512, )
lowercase__ = CLIPTextModel(lowerCamelCase )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowercase__ ( self : int, lowerCamelCase : Union[str, Any], lowerCamelCase : int=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(lowerCamelCase )
else:
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = TextToVideoSDPipeline(**lowerCamelCase )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = '''np'''
lowercase__ = sd_pipe(**lowerCamelCase ).frames
lowercase__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowercase__ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : str ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase, expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase, expected_max_diff=1E-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def lowercase__ ( self : int ):
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
lowercase__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ = pipe.to('''cuda''' )
lowercase__ = '''Spiderman is surfing'''
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ = pipe(lowerCamelCase, generator=lowerCamelCase, num_inference_steps=25, output_type='''pt''' ).frames
lowercase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
lowercase__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
lowercase__ = pipe.to('''cuda''' )
lowercase__ = '''Spiderman is surfing'''
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ = pipe(lowerCamelCase, generator=lowerCamelCase, num_inference_steps=2, output_type='''pt''' ).frames
lowercase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 207 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Dict, *lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Any=None, **lowerCamelCase : str ):
'''simple docstring'''
super().__init__(*lowerCamelCase, **lowerCamelCase )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowercase__ ( self : int, lowerCamelCase : str=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : str = "eval" ):
'''simple docstring'''
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(lowerCamelCase )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
lowerCamelCase, description='''Evaluation''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions )
lowercase__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(lowerCamelCase )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCamelCase )
return metrics
def lowercase__ ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : int=None, lowerCamelCase : str = "test" ):
'''simple docstring'''
lowercase__ = self.get_test_dataloader(lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
lowerCamelCase, description='''Prediction''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions, '''predict''' )
lowercase__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCamelCase )
| 207 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
A__ = DetaConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=SCREAMING_SNAKE_CASE__ , with_box_refine=SCREAMING_SNAKE_CASE__ , two_stage=SCREAMING_SNAKE_CASE__ , )
# set labels
A__ = 'huggingface/label-files'
if "o365" in model_name:
A__ = 366
A__ = 'object365-id2label.json'
else:
A__ = 91
A__ = 'coco-detection-id2label.json'
A__ = num_labels
A__ = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
A__ = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
'''simple docstring'''
A__ = dct.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
'''simple docstring'''
A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
A__ = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:dim, :]
A__ = in_proj_bias[: dim]
A__ = in_proj_weight[
dim : dim * 2, :
]
A__ = in_proj_bias[
dim : dim * 2
]
A__ = in_proj_weight[
-dim :, :
]
A__ = in_proj_bias[-dim :]
# fmt: on
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
'''simple docstring'''
A__ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
A__ = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A__ = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:hidden_size, :]
A__ = in_proj_bias[:hidden_size]
A__ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
A__ = in_proj_bias[hidden_size : hidden_size * 2]
A__ = in_proj_weight[-hidden_size:, :]
A__ = in_proj_bias[-hidden_size:]
def _snake_case( ) -> Dict:
'''simple docstring'''
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
'''simple docstring'''
A__ = get_deta_config(SCREAMING_SNAKE_CASE__ )
# load original state dict
if model_name == "deta-swin-large":
A__ = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
A__ = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'Model name {model_name} not supported' )
A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE__ , param.shape )
# rename keys
A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
if "input_proj" in key:
A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
# finally, create HuggingFace model and load state dict
A__ = DetaForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
A__ = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(SCREAMING_SNAKE_CASE__ )
# load image processor
A__ = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
A__ = prepare_img()
A__ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(pixel_values.to(SCREAMING_SNAKE_CASE__ ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
A__ = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
A__ = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
A__ = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
A__ = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(SCREAMING_SNAKE_CASE__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(SCREAMING_SNAKE_CASE__ ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 351 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = VideoToVideoSDPipeline
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
lowerCamelCase = False
# No `output_type`.
lowerCamelCase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def snake_case__ ( self : Tuple )-> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4),layers_per_block=2,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=3_2,attention_head_dim=4,)
A__ = DDIMScheduler(
beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,)
A__ = CLIPTextModel(lowercase_ )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int],lowercase_ : List[Any]=0 )-> Any:
'''simple docstring'''
A__ = floats_tensor((1, 3, 3, 3_2, 3_2),rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith('mps' ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case__ ( self : List[Any] )-> List[Any]:
'''simple docstring'''
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = VideoToVideoSDPipeline(**lowercase_ )
A__ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
A__ = self.get_dummy_inputs(lowercase_ )
A__ = 'np'
A__ = sd_pipe(**lowercase_ ).frames
A__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
A__ = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case__ ( self : List[Any] )-> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[int] )-> Optional[Any]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
A__ = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6),generator=lowercase_ )
A__ = video.to('cuda' )
A__ = 'Spiderman is surfing'
A__ = pipe(lowercase_,video=lowercase_,generator=lowercase_,num_inference_steps=3,output_type='pt' ).frames
A__ = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 282 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase__ =logging.get_logger(__name__)
lowercase__ =OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
lowercase__ =OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowercase__ =OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowercase__ =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
lowercase__ =OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
lowercase__ =OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
lowercase__ =OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
lowercase__ =OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
lowercase__ =OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
lowercase__ =OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
lowercase__ =OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
lowercase__ =OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
lowercase__ =OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
lowercase__ =OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
lowercase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase__ =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : Optional[Any] = FLAX_MODEL_MAPPING
lowercase__ =auto_class_update(FlaxAutoModel)
class UpperCamelCase__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : Tuple = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase__ =auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class UpperCamelCase__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : Tuple = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase__ =auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class UpperCamelCase__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : Tuple = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase__ =auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class UpperCamelCase__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : str = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : Optional[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase__ =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class UpperCamelCase__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : List[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase__ =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase__ =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class UpperCamelCase__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : List[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase__ =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : str = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase__ =auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : Tuple = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase__ =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class UpperCamelCase__ ( _BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase__ =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 216 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase__ ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
lowercase__ =logging.WARNING
def __UpperCamelCase ( ):
__a : Optional[Any] = os.getenv('''DATASETS_VERBOSITY''' , lowerCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __UpperCamelCase ( ):
return __name__.split('''.''' )[0]
def __UpperCamelCase ( ):
return logging.getLogger(_get_library_name() )
def __UpperCamelCase ( ):
# Apply our default configuration to the library root logger.
__a : str = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __UpperCamelCase ( ):
__a : Any = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[str] = None ):
if name is None:
__a : Union[str, Any] = _get_library_name()
return logging.getLogger(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return _get_library_root_logger().getEffectiveLevel()
def __UpperCamelCase ( lowerCAmelCase__ : int ):
_get_library_root_logger().setLevel(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return set_verbosity(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return set_verbosity(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return set_verbosity(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return set_verbosity(lowerCAmelCase__ )
def __UpperCamelCase ( ):
__a : Union[str, Any] = False
def __UpperCamelCase ( ):
__a : Tuple = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class UpperCamelCase__ :
def __init__(self : str , *snake_case_ : str , **snake_case_ : Union[str, Any] ): # pylint: disable=unused-argument
__a : Optional[Any] = args[0] if args else None
def __iter__(self : List[str] ):
return iter(self._iterator )
def __getattr__(self : str , snake_case_ : Optional[Any] ):
def empty_fn(*snake_case_ : int , **snake_case_ : int ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self : Union[str, Any] ):
return self
def __exit__(self : str , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Optional[Any] ):
return
lowercase__ =True
class UpperCamelCase__ :
def __call__(self : Tuple , *snake_case_ : str , snake_case_ : str=False , **snake_case_ : Dict ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*snake_case_ , **snake_case_ )
else:
return EmptyTqdm(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Optional[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any] ):
__a : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : str ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase__ =_tqdm_cls()
def __UpperCamelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def __UpperCamelCase ( ):
global _tqdm_active
__a : Dict = True
def __UpperCamelCase ( ):
global _tqdm_active
__a : Union[str, Any] = False
| 216 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''roformer'''
def __init__( self :Optional[int] , __magic_name__ :Optional[int]=5_0000 , __magic_name__ :Optional[Any]=None , __magic_name__ :int=768 , __magic_name__ :List[Any]=12 , __magic_name__ :Any=12 , __magic_name__ :Any=3072 , __magic_name__ :Any="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Any=0.1 , __magic_name__ :int=1536 , __magic_name__ :Optional[Any]=2 , __magic_name__ :Any=0.02 , __magic_name__ :Tuple=1E-1_2 , __magic_name__ :Tuple=0 , __magic_name__ :List[Any]=False , __magic_name__ :int=True , **__magic_name__ :List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
a = vocab_size
a = hidden_size if embedding_size is None else embedding_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = rotary_value
a = use_cache
class __lowerCAmelCase ( __magic_name__ ):
@property
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
a = {0: """batch""", 1: """sequence"""}
a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 369 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = (IPNDMScheduler,)
UpperCamelCase__ = (('''num_inference_steps''', 50),)
def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
a = {"""num_train_timesteps""": 1000}
config.update(**__magic_name__ )
return config
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a = dummy_past_residuals[:]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.full_loop()
a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 347 | 0 |
"""simple docstring"""
import numpy
# List of input, output pairs
__UpperCAmelCase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__UpperCAmelCase = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
__UpperCAmelCase = [2, 4, 1, 5]
__UpperCAmelCase = len(train_data)
__UpperCAmelCase = 0.009
def _snake_case ( lowercase__ : List[str] , lowercase__ : str="train" ) -> Optional[int]:
'''simple docstring'''
return calculate_hypothesis_value(lowercase__ , lowercase__ ) - output(
lowercase__ , lowercase__ )
def _snake_case ( lowercase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = 0
for i in range(len(lowercase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _snake_case ( lowercase__ : int , lowercase__ : int=m ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = 0
for i in range(lowercase__ ):
if index == -1:
summation_value += _error(lowercase__ )
else:
summation_value += _error(lowercase__ ) * train_data[i][0][index]
return summation_value
def _snake_case ( lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = summation_of_cost_derivative(lowercase__ , lowercase__ ) / m
return cost_derivative_value
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCAmelCase_ :Union[str, Any] = 0.000002
lowerCAmelCase_ :Optional[Any] = 0
lowerCAmelCase_ :int = 0
while True:
j += 1
lowerCAmelCase_ :List[Any] = [0, 0, 0, 0]
for i in range(0 , len(lowercase__ ) ):
lowerCAmelCase_ :Any = get_cost_derivative(i - 1 )
lowerCAmelCase_ :Optional[int] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowercase__ , lowercase__ , atol=lowercase__ , rtol=lowercase__ , ):
break
lowerCAmelCase_ :Optional[int] = temp_parameter_vector
print(("""Number of iterations:""", j) )
def _snake_case ( ) -> Dict:
'''simple docstring'''
for i in range(len(lowercase__ ) ):
print(("""Actual output value:""", output(lowercase__ , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(lowercase__ , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 84 |
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
A : List[str] = logging.get_logger(__name__)
A : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A : Tuple = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
A : Union[str, Any] = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
A : List[str] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = SqueezeBertTokenizer
def __init__(self : Optional[int] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=None , _UpperCAmelCase : int=True , _UpperCAmelCase : str="[UNK]" , _UpperCAmelCase : List[Any]="[SEP]" , _UpperCAmelCase : int="[PAD]" , _UpperCAmelCase : Optional[Any]="[CLS]" , _UpperCAmelCase : Union[str, Any]="[MASK]" , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : int , ) -> Any:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _UpperCAmelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(_UpperCAmelCase , normalizer_state.pop("""type""" ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**_UpperCAmelCase )
lowercase__ = do_lower_case
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=None ) -> int:
"""simple docstring"""
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ (self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 146 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : List[Any] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''segformer'''
def __init__(self : Dict , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Union[str, Any]=[2, 2, 2, 2] , _UpperCAmelCase : List[str]=[8, 4, 2, 1] , _UpperCAmelCase : str=[32, 64, 160, 256] , _UpperCAmelCase : Optional[int]=[7, 3, 3, 3] , _UpperCAmelCase : int=[4, 2, 2, 2] , _UpperCAmelCase : str=[1, 2, 5, 8] , _UpperCAmelCase : Optional[int]=[4, 4, 4, 4] , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[Any]=1E-6 , _UpperCAmelCase : Optional[Any]=256 , _UpperCAmelCase : Any=255 , **_UpperCAmelCase : str , ) -> Tuple:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , _UpperCAmelCase , )
lowercase__ = num_channels
lowercase__ = num_encoder_blocks
lowercase__ = depths
lowercase__ = sr_ratios
lowercase__ = hidden_sizes
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = mlp_ratios
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = classifier_dropout_prob
lowercase__ = initializer_range
lowercase__ = drop_path_rate
lowercase__ = layer_norm_eps
lowercase__ = decoder_hidden_size
lowercase__ = kwargs.get("""reshape_last_stage""" , _UpperCAmelCase )
lowercase__ = semantic_loss_ignore_index
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ (self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ (self : Optional[int] ) -> float:
"""simple docstring"""
return 1E-4
@property
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 146 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = ['''pixel_values''']
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 8 , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: List[str] = do_rescale
_A: Any = rescale_factor
_A: List[Any] = do_pad
_A: Tuple = pad_size
def __magic_name__ ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any ):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
_A , _A: Optional[int] = get_image_size(lowerCAmelCase_ )
_A: Union[str, Any] = (old_height // size + 1) * size - old_height
_A: Optional[Any] = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase_ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : int , ):
"""simple docstring"""
_A: List[str] = do_rescale if do_rescale is not None else self.do_rescale
_A: int = rescale_factor if rescale_factor is not None else self.rescale_factor
_A: str = do_pad if do_pad is not None else self.do_pad
_A: Union[str, Any] = pad_size if pad_size is not None else self.pad_size
_A: List[Any] = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_A: Union[str, Any] = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_rescale:
_A: str = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_pad:
_A: str = [self.pad(lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
_A: Optional[Any] = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_A: List[Any] = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 121 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 58 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any]=7 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: int=10 , _SCREAMING_SNAKE_CASE: Tuple=18 , _SCREAMING_SNAKE_CASE: Union[str, Any]=30 , _SCREAMING_SNAKE_CASE: Any=400 , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: str=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: Any=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = size if size is not None else {"shortest_edge": 18}
__lowerCAmelCase : int = crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : int = num_frames
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : Tuple = min_resolution
__lowerCAmelCase : Tuple = max_resolution
__lowerCAmelCase : str = do_resize
__lowerCAmelCase : Optional[int] = size
__lowerCAmelCase : Optional[int] = do_normalize
__lowerCAmelCase : Dict = image_mean
__lowerCAmelCase : List[Any] = image_std
__lowerCAmelCase : List[Any] = crop_size
def _SCREAMING_SNAKE_CASE ( self: int) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VivitImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VivitImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_mean"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_std"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_normalize"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_resize"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_center_crop"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "size"))
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 18})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
__lowerCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def _SCREAMING_SNAKE_CASE ( self: int) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL videos
__lowerCAmelCase : Dict = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE)
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertIsInstance(video[0] , Image.Image)
# Test not batched input
__lowerCAmelCase : Any = image_processing(video_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : str = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE)
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertIsInstance(video[0] , np.ndarray)
# Test not batched input
__lowerCAmelCase : Any = image_processing(video_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : List[str] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE)
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertIsInstance(video[0] , torch.Tensor)
# Test not batched input
__lowerCAmelCase : List[str] = image_processing(video_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : Any = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , ) | 58 | 1 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase__ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
lowercase__ = '''sshleifer/student_marian_en_ro_6_1'''
lowercase__ = '''sshleifer/tiny-mbart'''
@require_torch
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Tuple=True , lowercase_ : List[str]=True , ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=lowercase_ , num_train_epochs=1 , distributed=lowercase_ , extra_args_str=lowercase_ , predict_with_generate=lowercase_ , do_train=lowercase_ , do_eval=lowercase_ , do_predict=lowercase_ , )
UpperCAmelCase : Dict = TrainerState.load_from_json(os.path.join(lowercase_ , 'trainer_state.json' ) ).log_history
if not do_eval:
return
UpperCAmelCase : Union[str, Any] = [log for log in logs if 'eval_loss' in log.keys()]
UpperCAmelCase : Optional[Any] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase : Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , lowercase_ )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
self.run_seqaseq_quick(distributed=lowercase_ )
@require_torch_multi_gpu
def UpperCAmelCase_ ( self : Any ) -> int:
self.run_seqaseq_quick(distributed=lowercase_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase_ ( self : Dict ) -> Any:
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=lowercase_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase_ ( self : int ) -> Dict:
self.run_seqaseq_quick(
distributed=lowercase_ , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=lowercase_ )
@require_apex
@require_torch_gpu
def UpperCAmelCase_ ( self : Any ) -> Any:
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Any ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
UpperCAmelCase : List[str] = experiments[experiment_id]
UpperCAmelCase : List[Any] = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
UpperCAmelCase : str = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowercase_ , extra_args_str=data['extra_args_str'] )
UpperCAmelCase : List[Any] = len(re.findall(lowercase_ , cl.err ) )
self.assertEqual(lowercase_ , data['n_matches'] )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : List[Any] = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=lowercase_ , learning_rate=3E-4 , num_train_epochs=10 , distributed=lowercase_ , )
# Check metrics
UpperCAmelCase : Union[str, Any] = TrainerState.load_from_json(os.path.join(lowercase_ , 'trainer_state.json' ) ).log_history
UpperCAmelCase : Optional[Any] = [log for log in logs if 'eval_loss' in log.keys()]
UpperCAmelCase : Any = eval_metrics[0]
UpperCAmelCase : List[str] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , lowercase_ )
# test if do_predict saves generations and metrics
UpperCAmelCase : List[str] = os.listdir(lowercase_ )
UpperCAmelCase : List[str] = {os.path.basename(lowercase_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCAmelCase_ ( self : Tuple ) -> int:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowercase_ : str ) -> Tuple[int, float]:
UpperCAmelCase : Optional[Any] = '--skip_memory_metrics 0'
UpperCAmelCase : Tuple = self.run_trainer(
max_len=128 , model_name=lowercase_ , learning_rate=3E-4 , num_train_epochs=1 , optim=lowercase_ , distributed=lowercase_ , extra_args_str=lowercase_ , do_eval=lowercase_ , do_predict=lowercase_ , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase : int = TrainerState.load_from_json(Path(lowercase_ , 'trainer_state.json' ) ).log_history
UpperCAmelCase : Tuple = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
UpperCAmelCase : Union[str, Any] = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
UpperCAmelCase : int = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
UpperCAmelCase : List[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase : Optional[Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase : Optional[int] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase : Dict = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase : Tuple = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowercase_ , lowercase_ , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
lowercase_ , lowercase_ , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
lowercase_ , lowercase_ , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : int , lowercase_ : float = 3E-3 , lowercase_ : str = "adafactor" , lowercase_ : bool = False , lowercase_ : str = None , lowercase_ : int = 0 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : int = None , ) -> Tuple:
UpperCAmelCase : Tuple = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
UpperCAmelCase : str = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Optional[int] = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(lowercase_ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(lowercase_ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
UpperCAmelCase : Union[str, Any] = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(lowercase_ )}
""".split()
UpperCAmelCase : int = '\n --do_predict\n '.split()
UpperCAmelCase : Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase : Any = get_gpu_count()
UpperCAmelCase : Any = get_torch_dist_unique_port()
UpperCAmelCase : Union[str, Any] = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
UpperCAmelCase : Union[str, Any] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase_ , env=self.get_env() )
else:
UpperCAmelCase : int = ['run_translation.py'] + args
with patch.object(lowercase_ , 'argv' , lowercase_ ):
main()
return output_dir
| 151 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowercase_ :
def __init__( self , __UpperCamelCase , __UpperCamelCase = 1_3 , __UpperCamelCase = 6_4 , __UpperCamelCase = 2 , __UpperCamelCase = 3 , __UpperCamelCase = 3 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = 1_2_8 , __UpperCamelCase=[1_6, 3_2, 6_4, 1_2_8] , __UpperCamelCase = 7 , __UpperCamelCase = 4 , __UpperCamelCase = 3_7 , __UpperCamelCase = "gelu" , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , __UpperCamelCase = 1_0 , __UpperCamelCase = 0.02 , __UpperCamelCase = 2 , __UpperCamelCase = 1 , __UpperCamelCase = 1_2_8 , __UpperCamelCase = [2, 2, 2, 2] , __UpperCamelCase = 2 , __UpperCamelCase = 2 , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = encoder_stride
UpperCamelCase_ = num_attention_outputs
UpperCamelCase_ = embed_dim
UpperCamelCase_ = embed_dim + 1
UpperCamelCase_ = resolution
UpperCamelCase_ = depths
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = dim
UpperCamelCase_ = mlp_expansion_ratio
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = TFEfficientFormerModel(config=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.type_sequence_label_size
UpperCamelCase_ = TFEfficientFormerForImageClassification(__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_ = 1
UpperCamelCase_ = TFEfficientFormerForImageClassification(__UpperCamelCase )
UpperCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Dict = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
A__ : str = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
A__ : Tuple = False
A__ : Any = False
A__ : List[str] = False
A__ : Union[str, Any] = False
A__ : Dict = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = TFEfficientFormerModelTester(self )
UpperCamelCase_ = ConfigTester(
self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=3_7 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) , training=__UpperCamelCase )
UpperCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase_ = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
UpperCamelCase_ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
UpperCamelCase_ = seq_length * self.model_tester.chunk_length
else:
UpperCamelCase_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
UpperCamelCase_ = outputs.decoder_hidden_states
self.asseretIsInstance(__UpperCamelCase , (list, tuple) )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
UpperCamelCase_ = getattr(self.model_tester , """seq_length""" , __UpperCamelCase )
UpperCamelCase_ = getattr(self.model_tester , """decoder_seq_length""" , __UpperCamelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
UpperCamelCase_ = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = TFEfficientFormerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
UpperCamelCase_ = getattr(self.model_tester , """seq_length""" , __UpperCamelCase )
UpperCamelCase_ = getattr(self.model_tester , """encoder_seq_length""" , __UpperCamelCase )
UpperCamelCase_ = getattr(self.model_tester , """key_length""" , __UpperCamelCase )
UpperCamelCase_ = getattr(self.model_tester , """chunk_length""" , __UpperCamelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
UpperCamelCase_ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = True
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) , training=__UpperCamelCase )
UpperCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase_ = True
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) , training=__UpperCamelCase )
UpperCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCamelCase_ = model_class(__UpperCamelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCamelCase_ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__UpperCamelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCamelCase_ = model(__UpperCamelCase )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ) -> Any:
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""tf""" )
# forward pass
UpperCamelCase_ = model(**__UpperCamelCase , training=__UpperCamelCase )
# verify the logits
UpperCamelCase_ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""tf""" )
# forward pass
UpperCamelCase_ = model(**__UpperCamelCase , training=__UpperCamelCase )
# verify the logits
UpperCamelCase_ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 261 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_5_5 , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
UpperCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name="""crop_size""" )
UpperCamelCase_ = do_resize
UpperCamelCase_ = do_rescale
UpperCamelCase_ = do_normalize
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(__UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCamelCase_ = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase ):
"""simple docstring"""
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(__UpperCamelCase , param_name="""crop_size""" , default_to_square=__UpperCamelCase )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
if not is_batched(__UpperCamelCase ):
UpperCamelCase_ = [images]
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase_ = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
UpperCamelCase_ = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 261 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A : List[Any] = logging.get_logger(__name__)
A : str = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
A : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase__ = model_type_to_module_name(_SCREAMING_SNAKE_CASE )
lowercase__ = importlib.import_module(f'''.{module_name}''' , """transformers.models""" )
try:
return getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_SCREAMING_SNAKE_CASE , """__name__""" , _SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase__ = importlib.import_module("""transformers""" )
if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return None
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : Dict = None , __magic_name__ : Optional[Any] = False , __magic_name__ : List[Any] = False , __magic_name__ : Dict = None , __magic_name__ : int = None , __magic_name__ : Any = None , __magic_name__ : Tuple = False , **__magic_name__ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = get_file_from_repo(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as reader:
return json.load(_SCREAMING_SNAKE_CASE )
class A :
'''simple docstring'''
def __init__(self : Tuple ) -> Optional[Any]:
"""simple docstring"""
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase_ )
def lowerCamelCase__ (cls : Optional[int] , _UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = kwargs.pop("""config""" , UpperCAmelCase_ )
lowercase__ = kwargs.pop("""trust_remote_code""" , UpperCAmelCase_ )
lowercase__ = True
lowercase__ , lowercase__ = ImageProcessingMixin.get_image_processor_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
lowercase__ = config_dict.get("""image_processor_type""" , UpperCAmelCase_ )
lowercase__ = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
lowercase__ = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowercase__ = config_dict.pop("""feature_extractor_type""" , UpperCAmelCase_ )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
lowercase__ = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
lowercase__ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
lowercase__ = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowercase__ = AutoConfig.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# It could be in `config.image_processor_type``
lowercase__ = getattr(UpperCAmelCase_ , """image_processor_type""" , UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
lowercase__ = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
lowercase__ = image_processor_class_from_name(UpperCAmelCase_ )
lowercase__ = image_processor_auto_map is not None
lowercase__ = image_processor_class is not None or type(UpperCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING
lowercase__ = resolve_trust_remote_code(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if has_remote_code and trust_remote_code:
lowercase__ = get_class_from_dynamic_module(
UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
lowercase__ = kwargs.pop("""code_revision""" , UpperCAmelCase_ )
if os.path.isdir(UpperCAmelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING:
lowercase__ = IMAGE_PROCESSOR_MAPPING[type(UpperCAmelCase_ )]
return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(UpperCAmelCase_ , UpperCAmelCase_ )
| 305 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A (snake_case__):
'''simple docstring'''
@slow
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case_ = bertabert.config.encoder.vocab_size
snake_case_ = tokenizer.sep_token_id
snake_case_ = tokenizer.cls_token_id
snake_case_ = 128
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
snake_case_ = train_dataset.select(range(32 ) )
snake_case_ = val_dataset.select(range(16 ) )
snake_case_ = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ):
# Tokenizer will automatically set [BOS] <text> [EOS]
snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 )
snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 )
snake_case_ = inputs.input_ids
snake_case_ = inputs.attention_mask
snake_case_ = outputs.input_ids
snake_case_ = outputs.input_ids.copy()
snake_case_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
snake_case_ = outputs.attention_mask
assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ):
snake_case_ = pred.label_ids
snake_case_ = pred.predictions
# all unnecessary tokens are removed
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
snake_case_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
snake_case_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
snake_case_ = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
# start training
trainer.train()
| 347 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=3 , A__=10 , A__=18 , A__=30 , A__=400 , A__=True , A__=None , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , A__=None , ):
A__ : List[str] = size if size is not None else {'shortest_edge': 18}
A__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
A__ : List[str] = parent
A__ : Dict = batch_size
A__ : Tuple = num_channels
A__ : Optional[Any] = num_frames
A__ : List[Any] = image_size
A__ : Optional[int] = min_resolution
A__ : Dict = max_resolution
A__ : Dict = do_resize
A__ : Any = size
A__ : int = do_normalize
A__ : Optional[int] = image_mean
A__ : List[str] = image_std
A__ : Optional[int] = crop_size
def __A ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _a (__UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = VivitImageProcessor if is_vision_available() else None
def __A ( self ):
A__ : str = VivitImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
A__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """image_mean""" ) )
self.assertTrue(hasattr(A__ , """image_std""" ) )
self.assertTrue(hasattr(A__ , """do_normalize""" ) )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """do_center_crop""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
def __A ( self ):
A__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
A__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __A ( self ):
# Initialize image_processing
A__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
A__ : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
A__ : int = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : Any = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
A__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
A__ : Optional[int] = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : List[Any] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
A__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : int = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
A__ : Tuple = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A__ : str = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 351 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = '''MCTCTFeatureExtractor'''
UpperCAmelCase__: Optional[int] = '''AutoTokenizer'''
def __init__( self , A__ , A__ ):
super().__init__(A__ , A__ )
A__ : List[str] = self.feature_extractor
A__ : Optional[int] = False
def __call__( self , *A__ , **A__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
A__ : Dict = kwargs.pop("""raw_speech""" )
else:
A__ : Tuple = kwargs.pop("""audio""" , A__ )
A__ : Union[str, Any] = kwargs.pop("""sampling_rate""" , A__ )
A__ : int = kwargs.pop("""text""" , A__ )
if len(A__ ) > 0:
A__ : Optional[int] = args[0]
A__ : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
A__ : List[str] = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
A__ : Optional[Any] = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ : List[Any] = encodings["""input_ids"""]
return inputs
def __A ( self , *A__ , **A__ ):
return self.tokenizer.batch_decode(*A__ , **A__ )
def __A ( self , *A__ , **A__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A__ , **A__ )
A__ : Optional[Any] = kwargs.pop("""input_features""" , A__ )
A__ : Union[str, Any] = kwargs.pop("""labels""" , A__ )
if len(A__ ) > 0:
A__ : List[Any] = args[0]
A__ : Optional[int] = args[1:]
if input_features is not None:
A__ : Union[str, Any] = self.feature_extractor.pad(A__ , *A__ , **A__ )
if labels is not None:
A__ : List[Any] = self.tokenizer.pad(A__ , **A__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ : Dict = labels["""input_ids"""]
return input_features
def __A ( self , *A__ , **A__ ):
return self.tokenizer.decode(*A__ , **A__ )
@contextmanager
def __A ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
A__ : int = True
A__ : List[Any] = self.tokenizer
yield
A__ : Tuple = self.feature_extractor
A__ : Dict = False
| 141 | 0 |
from collections import deque
from math import floor
from random import random
from time import time
class __magic_name__ :
def __init__( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCamelCase__ : str = {}
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCamelCase__ : List[Any] = [[w, v]]
if not self.graph.get(lowerCamelCase__ ):
UpperCamelCase__ : Any = []
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return list(self.graph )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : int=-2 , lowerCamelCase__ : int=-1 ) -> List[Any]:
'''simple docstring'''
if s == d:
return []
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Dict = []
if s == -2:
UpperCamelCase__ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : Optional[int] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : int = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return visited
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Optional[int]=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
UpperCamelCase__ : int = floor(random() * 10000 ) + 10
for i in range(lowerCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCamelCase__ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase__ , lowerCamelCase__ , 1 )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Tuple=-2 ) -> str:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = deque()
UpperCamelCase__ : Optional[Any] = []
if s == -2:
UpperCamelCase__ : Optional[int] = list(self.graph )[0]
d.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
while d:
UpperCamelCase__ : List[str] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : List[str] ) -> int:
'''simple docstring'''
return len(self.graph[u] )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : List[str]=-2 ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : int = []
UpperCamelCase__ : Optional[int] = []
if s == -2:
UpperCamelCase__ : Dict = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = s
UpperCamelCase__ : Dict = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : List[Any] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return sorted_nodes
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : int = []
UpperCamelCase__ : List[Any] = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : Dict = -2
UpperCamelCase__ : int = []
UpperCamelCase__ : Tuple = s
UpperCamelCase__ : str = False
UpperCamelCase__ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : Union[str, Any] = len(lowerCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : Optional[int] = True
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : List[Any] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : Optional[Any] = False
indirect_parents.append(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = s
UpperCamelCase__ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return list(lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Tuple = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : int = -2
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : Tuple = s
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : List[str] = len(lowerCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : List[str] = True
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : Optional[Any] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : List[str] = False
indirect_parents.append(lowerCamelCase__ )
UpperCamelCase__ : Tuple = s
UpperCamelCase__ : List[Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return False
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Union[str, Any]=-2 , lowerCamelCase__ : Union[str, Any]=-1 ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = time()
self.dfs(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : int = time()
return end - begin
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : int=-2 ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[str] = time()
self.bfs(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = time()
return end - begin
class __magic_name__ :
def __init__( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Dict = {}
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple=1 ) -> Dict:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCamelCase__ : Union[str, Any] = [[w, v]]
# add the other way
if self.graph.get(lowerCamelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCamelCase__ : int = [[w, u]]
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase__ )
# the other way round
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Tuple=-2 , lowerCamelCase__ : Tuple=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Tuple = []
if s == -2:
UpperCamelCase__ : str = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : Optional[Any] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : List[str] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return visited
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int]=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
UpperCamelCase__ : List[Any] = floor(random() * 10000 ) + 10
for i in range(lowerCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCamelCase__ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase__ , lowerCamelCase__ , 1 )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int=-2 ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : List[Any] = deque()
UpperCamelCase__ : int = []
if s == -2:
UpperCamelCase__ : Dict = list(self.graph )[0]
d.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
while d:
UpperCamelCase__ : List[str] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : str ) -> List[Any]:
'''simple docstring'''
return len(self.graph[u] )
def UpperCAmelCase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : str = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : Dict = -2
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : Optional[int] = s
UpperCamelCase__ : int = False
UpperCamelCase__ : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : Optional[int] = len(lowerCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : Optional[Any] = True
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : List[str] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : Optional[Any] = False
indirect_parents.append(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = s
UpperCamelCase__ : Dict = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return list(lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
UpperCamelCase__ : int = []
UpperCamelCase__ : str = []
UpperCamelCase__ : Optional[int] = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = -2
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Optional[int] = s
UpperCamelCase__ : str = False
UpperCamelCase__ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : Optional[Any] = len(lowerCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : Optional[Any] = True
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : Optional[int] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : Tuple = False
indirect_parents.append(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = s
UpperCamelCase__ : Dict = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return False
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any=-2 , lowerCamelCase__ : str=-1 ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] = time()
self.dfs(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = time()
return end - begin
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str=-2 ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] = time()
self.bfs(lowerCamelCase__ )
UpperCamelCase__ : Any = time()
return end - begin
| 146 |
from collections.abc import Callable
def _a ( SCREAMING_SNAKE_CASE : Callable[[float], float] , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
UpperCamelCase__ : float = a
UpperCamelCase__ : float = b
if function(SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCamelCase__ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) < 0:
UpperCamelCase__ : Tuple = mid
else:
UpperCamelCase__ : Dict = mid
UpperCamelCase__ : List[str] = start + (end - start) / 2.0
return mid
def _a ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 146 | 1 |
'''simple docstring'''
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowercase : Dict = TypeVar("T")
class __UpperCAmelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase_ = True ):
"""simple docstring"""
_snake_case = {} # dictionary of lists
_snake_case = directed
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase_ )
self.adj_list[destination_vertex].append(UpperCAmelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase_ )
_snake_case = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCAmelCase_ )
_snake_case = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_snake_case = [destination_vertex]
_snake_case = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase_ )
_snake_case = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_snake_case = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_snake_case = [destination_vertex]
_snake_case = []
return self
def __repr__( self ):
"""simple docstring"""
return pformat(self.adj_list )
| 365 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(__A , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 160 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) ->bool:
_SCREAMING_SNAKE_CASE = get_failure_array(__lowerCamelCase )
# 2) Step through text searching for pattern
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0, 0 # index into text, pattern
while i < len(__lowerCamelCase ):
if pattern[j] == text[i]:
if j == (len(__lowerCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_SCREAMING_SNAKE_CASE = failure[j - 1]
continue
i += 1
return False
def lowerCamelCase ( __lowerCamelCase : str ) ->list[int]:
_SCREAMING_SNAKE_CASE = [0]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
while j < len(__lowerCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_SCREAMING_SNAKE_CASE = failure[i - 1]
continue
j += 1
failure.append(__lowerCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
lowercase_ = """abc1abc12"""
lowercase_ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowercase_ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowercase_ = """ABABX"""
lowercase_ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
lowercase_ = """AAAB"""
lowercase_ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
lowercase_ = """abcdabcy"""
lowercase_ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
lowercase_ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 58 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = """PoolFormerConfig"""
# Base docstring
lowercase_ = """sail/poolformer_s12"""
lowercase_ = [1, 512, 7, 7]
# Image classification docstring
lowercase_ = """sail/poolformer_s12"""
lowercase_ = """tabby, tabby cat"""
lowercase_ = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : float = 0.0 , __lowerCamelCase : bool = False ) ->int:
if drop_prob == 0.0 or not training:
return input
_SCREAMING_SNAKE_CASE = 1 - drop_prob
_SCREAMING_SNAKE_CASE = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_SCREAMING_SNAKE_CASE = keep_prob + torch.rand(__lowerCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_SCREAMING_SNAKE_CASE = input.div(__lowerCamelCase ) * random_tensor
return output
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A = None ) -> None:
super().__init__()
_SCREAMING_SNAKE_CASE = drop_prob
def snake_case_( self , A ) -> torch.Tensor:
return drop_path(A , self.drop_prob , self.training )
def snake_case_( self ) -> str:
return "p={}".format(self.drop_prob )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A=None ) -> Union[str, Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = patch_size if isinstance(A , collections.abc.Iterable ) else (patch_size, patch_size)
_SCREAMING_SNAKE_CASE = stride if isinstance(A , collections.abc.Iterable ) else (stride, stride)
_SCREAMING_SNAKE_CASE = padding if isinstance(A , collections.abc.Iterable ) else (padding, padding)
_SCREAMING_SNAKE_CASE = nn.Convad(A , A , kernel_size=A , stride=A , padding=A )
_SCREAMING_SNAKE_CASE = norm_layer(A ) if norm_layer else nn.Identity()
def snake_case_( self , A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.projection(A )
_SCREAMING_SNAKE_CASE = self.norm(A )
return embeddings
class a_ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , A , **A ) -> Union[str, Any]:
super().__init__(1 , A , **A )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> Union[str, Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.AvgPoolad(A , stride=1 , padding=pool_size // 2 , count_include_pad=A )
def snake_case_( self , A ) -> Union[str, Any]:
return self.pool(A ) - hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Convad(A , A , 1 )
_SCREAMING_SNAKE_CASE = nn.Convad(A , A , 1 )
_SCREAMING_SNAKE_CASE = PoolFormerDropPath(A )
if isinstance(config.hidden_act , A ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
def snake_case_( self , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.conva(A )
_SCREAMING_SNAKE_CASE = self.act_fn(A )
_SCREAMING_SNAKE_CASE = self.drop(A )
_SCREAMING_SNAKE_CASE = self.conva(A )
_SCREAMING_SNAKE_CASE = self.drop(A )
return hidden_states
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A ) -> Union[str, Any]:
super().__init__()
_SCREAMING_SNAKE_CASE = PoolFormerPooling(A )
_SCREAMING_SNAKE_CASE = PoolFormerOutput(A , A , A , A )
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(A )
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(A )
# Useful for training neural nets
_SCREAMING_SNAKE_CASE = PoolFormerDropPath(A ) if drop_path > 0.0 else nn.Identity()
_SCREAMING_SNAKE_CASE = config.use_layer_scale
if config.use_layer_scale:
_SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((A) ) , requires_grad=A )
_SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((A) ) , requires_grad=A )
def snake_case_( self , A ) -> Optional[Any]:
if self.use_layer_scale:
_SCREAMING_SNAKE_CASE = self.pooling(self.before_norm(A ) )
_SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(A )
_SCREAMING_SNAKE_CASE = ()
_SCREAMING_SNAKE_CASE = self.output(self.after_norm(A ) )
_SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(A )
_SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
else:
_SCREAMING_SNAKE_CASE = self.drop_path(self.pooling(self.before_norm(A ) ) )
# First residual connection
_SCREAMING_SNAKE_CASE = pooling_output + hidden_states
_SCREAMING_SNAKE_CASE = ()
# Second residual connection inside the PoolFormerOutput block
_SCREAMING_SNAKE_CASE = self.drop_path(self.output(self.after_norm(A ) ) )
_SCREAMING_SNAKE_CASE = hidden_states + layer_output
_SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> Any:
super().__init__()
_SCREAMING_SNAKE_CASE = config
# stochastic depth decay rule
_SCREAMING_SNAKE_CASE = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_SCREAMING_SNAKE_CASE = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_SCREAMING_SNAKE_CASE = nn.ModuleList(A )
# Transformer blocks
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_SCREAMING_SNAKE_CASE = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(A ) )
_SCREAMING_SNAKE_CASE = nn.ModuleList(A )
def snake_case_( self , A , A=False , A=True ) -> List[Any]:
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
_SCREAMING_SNAKE_CASE = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = layers
# Get patch embeddings from hidden_states
_SCREAMING_SNAKE_CASE = embedding_layer(A )
# Send the embeddings through the blocks
for _, blk in enumerate(A ):
_SCREAMING_SNAKE_CASE = blk(A )
_SCREAMING_SNAKE_CASE = layer_outputs[0]
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=A , hidden_states=A )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = PoolFormerConfig
UpperCamelCase = '''poolformer'''
UpperCamelCase = '''pixel_values'''
UpperCamelCase = True
def snake_case_( self , A ) -> int:
if isinstance(A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case_( self , A , A=False ) -> Dict:
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = value
lowercase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowercase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , snake_case_ , )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A ) -> int:
super().__init__(A )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = PoolFormerEncoder(A )
# Initialize weights and apply final processing
self.post_init()
def snake_case_( self ) -> Any:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case_( self , A = None , A = None , A = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.encoder(
A , output_hidden_states=A , return_dict=A , )
_SCREAMING_SNAKE_CASE = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=A , hidden_states=encoder_outputs.hidden_states , )
class a_ ( nn.Module ):
'''simple docstring'''
def __init__( self , A ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.hidden_size )
def snake_case_( self , A ) -> str:
_SCREAMING_SNAKE_CASE = self.dense(A )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , snake_case_ , )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A ) -> Optional[Any]:
super().__init__(A )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = PoolFormerModel(A )
# Final norm
_SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_SCREAMING_SNAKE_CASE = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case_( self , A = None , A = None , A = None , A = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.poolformer(
A , output_hidden_states=A , return_dict=A , )
_SCREAMING_SNAKE_CASE = outputs[0]
_SCREAMING_SNAKE_CASE = self.classifier(self.norm(A ).mean([-2, -1] ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(A , A )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(A , A )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A , logits=A , hidden_states=outputs.hidden_states )
| 58 | 1 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 351 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 241 | 0 |
"""simple docstring"""
def _lowerCamelCase( a , a = " " ):
__a = []
__a = 0
for index, char in enumerate(a ):
if char == separator:
split_words.append(string[last_index:index] )
__a = index + 1
elif index + 1 == len(a ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 261 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = use_labels
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = True
__a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ):
__a = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ):
__a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a__ ( self ):
__a = BertGenerationEncoderTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def a__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 261 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Tuple:
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]:
lowerCamelCase_ ={}
if top_k is not None:
lowerCamelCase_ =top_k
return {}, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> int:
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
lowerCamelCase_ =load_image(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]:
lowerCamelCase_ =self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 )-> Optional[Any]:
if top_k > self.model.config.num_labels:
lowerCamelCase_ =self.model.config.num_labels
if self.framework == "pt":
lowerCamelCase_ =model_outputs.logits.softmax(-1 )[0]
lowerCamelCase_ , lowerCamelCase_ =probs.topk(_SCREAMING_SNAKE_CASE )
elif self.framework == "tf":
lowerCamelCase_ =stable_softmax(model_outputs.logits , axis=-1 )[0]
lowerCamelCase_ =tf.math.top_k(_SCREAMING_SNAKE_CASE , k=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ , lowerCamelCase_ =topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowerCamelCase_ =scores.tolist()
lowerCamelCase_ =ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
| 355 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : Tuple = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ['GLPNFeatureExtractor']
__A : Dict = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 6_55_36 , lowerCAmelCase_ = None , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = "fourier" , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase_ = "UNetMidBlock1D" , lowerCAmelCase_ = None , lowerCAmelCase_ = (32, 32, 64) , lowerCAmelCase_ = None , lowerCAmelCase_ = 8 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = False , ):
"""simple docstring"""
super().__init__()
_snake_case = sample_size
# time
if time_embedding_type == "fourier":
_snake_case = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase_ , log=lowerCAmelCase_ , flip_sin_to_cos=lowerCAmelCase_ )
_snake_case = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_snake_case = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase_ , downscale_freq_shift=lowerCAmelCase_ )
_snake_case = block_out_channels[0]
if use_timestep_embedding:
_snake_case = block_out_channels[0] * 4
_snake_case = TimestepEmbedding(
in_channels=lowerCAmelCase_ , time_embed_dim=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , out_dim=block_out_channels[0] , )
_snake_case = nn.ModuleList([] )
_snake_case = None
_snake_case = nn.ModuleList([] )
_snake_case = None
# down
_snake_case = in_channels
for i, down_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_down_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
_snake_case = get_mid_block(
lowerCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase_ , add_downsample=lowerCAmelCase_ , )
# up
_snake_case = list(reversed(lowerCAmelCase_ ) )
_snake_case = reversed_block_out_channels[0]
if out_block_type is None:
_snake_case = out_channels
else:
_snake_case = block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase_ ) - 1 else final_upsample_channels
)
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_up_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase_ )
_snake_case = output_channel
# out
_snake_case = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_snake_case = get_out_block(
out_block_type=lowerCAmelCase_ , num_groups_out=lowerCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , ):
"""simple docstring"""
_snake_case = timestep
if not torch.is_tensor(lowerCAmelCase_ ):
_snake_case = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase_ ) and len(timesteps.shape ) == 0:
_snake_case = timesteps[None].to(sample.device )
_snake_case = self.time_proj(lowerCAmelCase_ )
if self.config.use_timestep_embedding:
_snake_case = self.time_mlp(lowerCAmelCase_ )
else:
_snake_case = timestep_embed[..., None]
_snake_case = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_snake_case = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_snake_case = ()
for downsample_block in self.down_blocks:
_snake_case , _snake_case = downsample_block(hidden_states=lowerCAmelCase_ , temb=lowerCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_snake_case = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_snake_case = down_block_res_samples[-1:]
_snake_case = down_block_res_samples[:-1]
_snake_case = upsample_block(lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , temb=lowerCAmelCase_ )
# 5. post-process
if self.out_block:
_snake_case = self.out_block(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase_ )
| 42 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : Union[str, Any]=2_81_23 ):
'''simple docstring'''
__lowercase =[1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
__lowercase =set()
__lowercase =0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 141 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __magic_name__ ( A__ ):
def __init__( self , __snake_case ) -> Tuple:
'''simple docstring'''
super().__init__()
__a =nn.ModuleList(__A )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = False , __snake_case = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ):
__a =controlnet(
__A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , )
# merge samples
if i == 0:
__a =down_samples, mid_sample
else:
__a =[
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__A , __A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __magic_name__ ( self , __snake_case , __snake_case = True , __snake_case = None , __snake_case = False , __snake_case = None , ) -> Dict:
'''simple docstring'''
__a =0
__a =save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , )
idx += 1
__a =model_path_to_save + f'_{idx}'
@classmethod
def __magic_name__ ( cls , __snake_case , **__snake_case ) -> int:
'''simple docstring'''
__a =0
__a =[]
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__a =pretrained_model_path
while os.path.isdir(__A ):
__a =ControlNetModel.from_pretrained(__A , **__A )
controlnets.append(__A )
idx += 1
__a =pretrained_model_path + f'_{idx}'
logger.info(f'{len(__A )} controlnets loaded from {pretrained_model_path}.' )
if len(__A ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(__A )
| 351 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __magic_name__ ( *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : Image ):
"""simple docstring"""
__a =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __snake_case )
import datasets
__a =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__a =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='Intel/dpt-large'
__a =pipeline('depth-estimation' , model=__snake_case )
__a =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__a =hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 308 | 0 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( __a ):
def __init__( self : Optional[int] , *lowercase : Optional[Any] , lowercase : Optional[Any]=None , lowercase : Tuple=None , **lowercase : Dict ):
'''simple docstring'''
super().__init__(*lowercase , **lowercase )
UpperCAmelCase = eval_examples
UpperCAmelCase = post_process_function
def A ( self : str , lowercase : List[Any]=None , lowercase : Tuple=None , lowercase : Optional[Any]=None , lowercase : str = "eval" ):
'''simple docstring'''
UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase = self.get_eval_dataloader(lowercase )
UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
lowercase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase = self.post_process_function(lowercase , lowercase , output.predictions )
UpperCAmelCase = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
UpperCAmelCase = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def A ( self : Optional[Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : Optional[Any]=None , lowercase : str = "test" ):
'''simple docstring'''
UpperCAmelCase = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
lowercase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase = self.post_process_function(lowercase , lowercase , output.predictions , '''predict''' )
UpperCAmelCase = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
UpperCAmelCase = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 34 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__lowerCAmelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Any = AudioClassificationPipeline(model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
# test with a raw waveform
__a : Optional[Any] = np.zeros((34000,) )
__a : Union[str, Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : Dict = examples
__a : Tuple = audio_classifier(_UpperCAmelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
__a : List[Any] = audio_classifier(_UpperCAmelCase , top_k=1 )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
self.run_torchaudio(_UpperCAmelCase )
@require_torchaudio
def _lowerCamelCase ( self , _UpperCAmelCase ):
import datasets
# test with a local file
__a : Tuple = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
__a : Union[str, Any] = dataset[0]['''audio''']['''array''']
__a : Tuple = audio_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
@require_torch
def _lowerCamelCase ( self ):
__a : Optional[Any] = '''anton-l/wav2vec2-random-tiny-classifier'''
__a : Union[str, Any] = pipeline('''audio-classification''' , model=_UpperCAmelCase )
__a : Optional[int] = np.ones((8000,) )
__a : Optional[int] = audio_classifier(_UpperCAmelCase , top_k=4 )
__a : Tuple = [
{'''score''': 0.0_8_4_2, '''label''': '''no'''},
{'''score''': 0.0_8_3_8, '''label''': '''up'''},
{'''score''': 0.0_8_3_7, '''label''': '''go'''},
{'''score''': 0.0_8_3_4, '''label''': '''right'''},
]
__a : Dict = [
{'''score''': 0.0_8_4_5, '''label''': '''stop'''},
{'''score''': 0.0_8_4_4, '''label''': '''on'''},
{'''score''': 0.0_8_4_1, '''label''': '''right'''},
{'''score''': 0.0_8_3_4, '''label''': '''left'''},
]
self.assertIn(nested_simplify(_UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__a : List[Any] = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
__a : Optional[Any] = audio_classifier(_UpperCAmelCase , top_k=4 )
self.assertIn(nested_simplify(_UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _lowerCamelCase ( self ):
import datasets
__a : Tuple = '''superb/wav2vec2-base-superb-ks'''
__a : Optional[int] = pipeline('''audio-classification''' , model=_UpperCAmelCase )
__a : int = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
__a : Any = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
__a : Tuple = audio_classifier(_UpperCAmelCase , top_k=4 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=3 ) , [
{'''score''': 0.9_8_1, '''label''': '''go'''},
{'''score''': 0.0_0_7, '''label''': '''up'''},
{'''score''': 0.0_0_6, '''label''': '''_unknown_'''},
{'''score''': 0.0_0_1, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def _lowerCamelCase ( self ):
pass | 160 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : List[str] )-> Any:
_lowerCamelCase = len(snake_case )
print('The following activities are selected:' )
# The first activity is always selected
_lowerCamelCase = 0
print(snake_case , end=',' )
# Consider rest of the activities
for j in range(snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case , end=',' )
_lowerCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Union[str, Any] =[1, 3, 0, 5, 8, 5]
A_ : Union[str, Any] =[2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 371 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Any ={
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =[
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
A_ : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Any:
'''simple docstring'''
def decorator(_lowercase : List[str] ):
a : Optional[Any] = getattr(_lowercase , "handle_key" , [] )
handle += [key]
setattr(_lowercase , "handle_key" , _lowercase )
return func
return decorator
def _SCREAMING_SNAKE_CASE ( *_lowercase : List[str] ) ->Any:
'''simple docstring'''
def decorator(_lowercase : str ):
a : Optional[Any] = getattr(_lowercase , "handle_key" , [] )
handle += keys
setattr(_lowercase , "handle_key" , _lowercase )
return func
return decorator
class __UpperCamelCase ( a__ ):
def __new__( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : int = super().__new__(cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not hasattr(lowerCAmelCase__ , "key_handler" ):
setattr(lowerCAmelCase__ , "key_handler" , {} )
setattr(lowerCAmelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
a : Tuple = getattr(lowerCAmelCase__ , "handle_key" , [] )
for key in handled_keys:
a : Union[str, Any] = value
return new_cls
@staticmethod
def __a ( cls ) -> int:
a : List[str] = get_character()
if char != KEYMAP["undefined"]:
a : Tuple = ord(lowerCAmelCase__ )
a : List[Any] = cls.key_handler.get(lowerCAmelCase__ )
if handler:
a : Dict = char
return handler(cls )
else:
return None
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 105 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : int = inspect.getfile(accelerate.test_utils )
lowerCAmelCase_ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
lowerCAmelCase_ : Dict = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
lowerCAmelCase_ : Dict = [sys.executable] + distributed_args
execute_subprocess_async(a_ , env=os.environ.copy() )
| 241 | 0 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = text, pattern
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ), len(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCAmelCase_ ( self : Optional[int] ) -> list[int]:
# searches pattern in text and returns index positions
__SCREAMING_SNAKE_CASE = []
for i in range(self.textLen - self.patLen + 1 ):
__SCREAMING_SNAKE_CASE = self.mismatch_in_text(UpperCAmelCase__ )
if mismatch_index == -1:
positions.append(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = self.match_in_pattern(self.text[mismatch_index] )
__SCREAMING_SNAKE_CASE = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
a__ : Tuple = '''ABAABA'''
a__ : List[str] = '''AB'''
a__ : List[str] = BoyerMooreSearch(text, pattern)
a__ : int = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 195 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : List[Any] = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "efficientformer"
def __init__( self : Any , UpperCAmelCase__ : List[int] = [3, 2, 6, 4] , UpperCAmelCase__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , UpperCAmelCase__ : List[bool] = [True, True, True, True] , UpperCAmelCase__ : int = 4_4_8 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 5 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1_6 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : float = 1E-5 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 1E-12 , UpperCAmelCase__ : int = 2_2_4 , UpperCAmelCase__ : float = 1E-05 , **UpperCAmelCase__ : Tuple , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_expansion_ratio
__SCREAMING_SNAKE_CASE = downsamples
__SCREAMING_SNAKE_CASE = dim
__SCREAMING_SNAKE_CASE = key_dim
__SCREAMING_SNAKE_CASE = attention_ratio
__SCREAMING_SNAKE_CASE = resolution
__SCREAMING_SNAKE_CASE = pool_size
__SCREAMING_SNAKE_CASE = downsample_patch_size
__SCREAMING_SNAKE_CASE = downsample_stride
__SCREAMING_SNAKE_CASE = downsample_pad
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = num_metaad_blocks
__SCREAMING_SNAKE_CASE = distillation
__SCREAMING_SNAKE_CASE = use_layer_scale
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = batch_norm_eps
| 195 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__A : Tuple = logging.get_logger(__name__)
class _a ( __UpperCAmelCase):
"""simple docstring"""
def __init__( self : Optional[int] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[Any] )->Optional[Any]:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 260 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ):
__a , __a = 9, 14 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a = defaultdict(_UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(_UpperCAmelCase )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 49 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class _snake_case ( lowercase_ ):
def __init__( self , *a__ , **a__ ) -> str:
'''simple docstring'''
super().__init__(*a__ , **a__ )
requires_backends(self , "vision" )
self.check_model_type(a__ )
def __call__( self , a__ , **a__ ) -> Dict:
'''simple docstring'''
return super().__call__(a__ , **a__ )
def lowerCAmelCase__ ( self , **a__ ) -> List[str]:
'''simple docstring'''
return {}, {}, {}
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = load_image(a__ )
snake_case_ = image.size
snake_case_ = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self , a__ ) -> Any:
'''simple docstring'''
snake_case_ = self.model(**a__ )
return model_outputs
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = model_outputs.predicted_depth
snake_case_ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=a__ )
snake_case_ = prediction.squeeze().cpu().numpy()
snake_case_ = (output * 255 / np.max(a__ )).astype("uint8" )
snake_case_ = Image.fromarray(a__ )
snake_case_ = {}
snake_case_ = predicted_depth
snake_case_ = depth
return output_dict
| 371 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_SCREAMING_SNAKE_CASE : Any = False
class _snake_case ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
image=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
snake_case_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 92 | 0 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self : Any , _A : bool , _A : Optional[int] = None , _A : Optional[int] = None ) -> Tuple:
"""simple docstring"""
super().__init__()
lowercase : Tuple = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowercase : Tuple = torch.zeros(_A , _A )
else:
lowercase : Any = None
lowercase : int = torch.nn.Parameter(_A )
class _A ( _lowerCamelCase ):
_UpperCamelCase : VQModel
_UpperCamelCase : CLIPTextModel
_UpperCamelCase : CLIPTokenizer
_UpperCamelCase : TransformeraDModel
_UpperCamelCase : LearnedClassifierFreeSamplingEmbeddings
_UpperCamelCase : VQDiffusionScheduler
def __init__( self : List[Any] , _A : VQModel , _A : CLIPTextModel , _A : CLIPTokenizer , _A : TransformeraDModel , _A : VQDiffusionScheduler , _A : LearnedClassifierFreeSamplingEmbeddings , ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def __a ( self : str , _A : Any , _A : Optional[Any] , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
lowercase : Dict = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
lowercase : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase : Dict = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowercase : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
lowercase : Union[str, Any] = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowercase : Optional[int] = self.learned_classifier_free_sampling_embeddings.embeddings
lowercase : Optional[int] = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
lowercase : Optional[Any] = [''''''] * batch_size
lowercase : Dict = text_input_ids.shape[-1]
lowercase : str = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
lowercase : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowercase : Tuple = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase : Tuple = negative_prompt_embeds.shape[1]
lowercase : List[str] = negative_prompt_embeds.repeat(1 , _A , 1 )
lowercase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Tuple , _A : Union[str, List[str]] , _A : int = 100 , _A : float = 5.0 , _A : float = 1.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(_A , _A ):
lowercase : List[Any] = 1
elif isinstance(_A , _A ):
lowercase : Union[str, Any] = len(_A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A )}""" )
lowercase : List[str] = batch_size * num_images_per_prompt
lowercase : Tuple = guidance_scale > 1.0
lowercase : Tuple = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
# get the initial completely masked latents unless the user supplied it
lowercase : Dict = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowercase : int = self.transformer.num_vector_embeds - 1
lowercase : Optional[Any] = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
lowercase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
lowercase : Any = self.scheduler.timesteps.to(self.device )
lowercase : List[Any] = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
lowercase : List[str] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowercase : List[str] = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
lowercase , lowercase : Optional[Any] = model_output.chunk(2 )
lowercase : Optional[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
lowercase : Optional[Any] = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
lowercase : Dict = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
lowercase : Any = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
lowercase : Optional[Any] = self.vqvae.config.vq_embed_dim
lowercase : Optional[int] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowercase : Optional[int] = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
lowercase : Dict = self.vqvae.decode(_A , force_not_quantize=_A ).sample
lowercase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
lowercase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : Dict = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def __a ( self : Dict , _A : torch.FloatTensor , _A : float ) -> torch.FloatTensor:
"""simple docstring"""
lowercase , lowercase : Tuple = torch.sort(_A , 1 , descending=_A )
lowercase : int = torch.exp(_A )
lowercase : Any = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowercase : Tuple = torch.full_like(keep_mask[:, 0:1, :] , _A )
lowercase : Optional[Any] = torch.cat((all_true, keep_mask) , dim=1 )
lowercase : Any = keep_mask[:, :-1, :]
lowercase : Union[str, Any] = keep_mask.gather(1 , indices.argsort(1 ) )
lowercase : str = log_p_x_0.clone()
lowercase : Optional[int] = -torch.inf # -inf = log(0)
return rv | 308 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng" | 308 | 1 |
def _lowerCAmelCase ( A__: Tuple ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
UpperCAmelCase = len(A__ ) if (len(A__ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(A__ ) , '''Postfix'''.center(A__ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(A__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(A__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(A__ ) == 0:
stack.append(A__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(A__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(A__ ) # push x to stack
print(
x.center(8 ) , (''''''.join(A__ )).ljust(A__ ) , (''''''.join(A__ )).ljust(A__ ) , sep=''' | ''' , ) # Output in tabular format
while len(A__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(A__ )).ljust(A__ ) , (''''''.join(A__ )).ljust(A__ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(A__ ) # return Postfix as str
def _lowerCAmelCase ( A__: Dict ):
'''simple docstring'''
UpperCAmelCase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(A__ ) ):
if infix[i] == "(":
UpperCAmelCase = ''')''' # change "(" to ")"
elif infix[i] == ")":
UpperCAmelCase = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(A__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__magic_name__ = input("\nEnter an Infix Equation = ") # Input an Infix equation
__magic_name__ = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 152 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__magic_name__ = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__magic_name__ = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def _lowerCAmelCase ( A__: List[Any] , A__: int ):
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(A__ ):
UpperCAmelCase = b
UpperCAmelCase = idx
for wd in b:
UpperCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|startoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
UpperCAmelCase = do_clean_text
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_vocab_and_emoji(_snake_case , _snake_case )
UpperCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def snake_case_ ( self ) -> Any:
"""simple docstring"""
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def snake_case_ ( self , _snake_case ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def snake_case_ ( self , _snake_case ) -> Dict:
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ''''''.join(_snake_case ).strip()
return out_string
def snake_case_ ( self , _snake_case ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = 0
if os.path.isdir(_snake_case ):
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase = token_index
writer.write(''','''.join(_snake_case ) + '''\n''' )
index += 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = vocab # same as swe
UpperCAmelCase = ids_to_tokens # same as bpe
UpperCAmelCase = emoji
UpperCAmelCase = np.max([len(_snake_case ) for w in self.vocab.keys()] )
UpperCAmelCase = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
UpperCAmelCase = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
UpperCAmelCase = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
UpperCAmelCase = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
UpperCAmelCase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
UpperCAmelCase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
UpperCAmelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.ids_to_tokens )
def snake_case_ ( self , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.content_repattera.sub('''<URL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<EMAIL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<TEL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<PRICE>''' , _snake_case )
UpperCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def snake_case_ ( self , _snake_case , _snake_case=False ) -> str:
"""simple docstring"""
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace('''\r\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\r''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\t''' , '''<TAB>''' )
UpperCAmelCase = text.replace('''—''' , '''ー''' )
UpperCAmelCase = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase = text.replace(_snake_case , _snake_case )
if clean:
UpperCAmelCase = self.clean_text(_snake_case )
def check_simbol(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
UpperCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
UpperCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28080 and c <= 0XE2B07F:
return True
return False
UpperCAmelCase = 0
UpperCAmelCase = []
while pos < len(_snake_case ):
UpperCAmelCase = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
UpperCAmelCase = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
UpperCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
UpperCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
UpperCAmelCase = e
else:
UpperCAmelCase = pos + 1
UpperCAmelCase = text[pos:end]
if check_simbol(_snake_case ):
result.append('''<KIGOU>''' )
elif checkuae(_snake_case ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
UpperCAmelCase = end
return result
def snake_case_ ( self , _snake_case , _snake_case="\n" ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = ''''''.join(_snake_case )
return text
| 152 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a__ : int = logging.get_logger(__name__)
a__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
a__ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_5_3_6,
'junnyu/roformer_chinese_base': 1_5_3_6,
'junnyu/roformer_chinese_char_small': 5_1_2,
'junnyu/roformer_chinese_char_base': 5_1_2,
'junnyu/roformer_small_discriminator': 1_2_8,
'junnyu/roformer_small_generator': 1_2_8,
}
a__ : str = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , a ) != do_lower_case
or pre_tok_state.get("strip_accents" , a ) != strip_accents
):
UpperCamelCase__ = getattr(a , pre_tok_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = pre_tok_class(**a )
UpperCamelCase__ = do_lower_case
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = BertPreTokenizer()
return state
def __setstate__( self , a ):
UpperCamelCase__ = d
UpperCamelCase__ = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__ = PreTokenizer.custom(JiebaPreTokenizer(a ) )
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
def __a ( self , a , a=None , a=None , a=False , **a , ):
UpperCamelCase__ = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a )
| 80 | 0 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@require_torch
def _a ( self : Any ):
"""simple docstring"""
A_ : Optional[Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
A_ : str = load_dataset('''ashraq/esc50''' )
A_ : str = dataset['''train''']['''audio'''][-1]['''array''']
A_ : int = audio_classifier(_lowerCamelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [{'''score''': 0.5_01, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_99, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def _a ( self : int ):
"""simple docstring"""
pass
@slow
@require_torch
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
A_ : List[Any] = load_dataset('''ashraq/esc50''' )
A_ : str = dataset['''train''']['''audio'''][-1]['''array''']
A_ : Dict = audio_classifier(_lowerCamelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''score''': 0.9_99, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_01, '''label''': '''Sound of vaccum cleaner'''},
] , )
A_ : str = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.9_99, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_01, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
A_ : Dict = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.9_99, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_01, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def _a ( self : str ):
"""simple docstring"""
pass
| 4 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = """▁"""
snake_case__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
snake_case__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
snake_case__ = {
"""facebook/s2t-small-librispeech-asr""": 10_24,
}
snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
snake_case__ = {"""mustc""": MUSTC_LANGS}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = MAX_MODEL_INPUT_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : Optional[int] = do_upper_case
A_ : Tuple = do_lower_case
A_ : Tuple = load_json(_lowerCamelCase )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
A_ : List[Any] = spm_file
A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A_ : Any = lang_codes
A_ : Optional[Any] = LANGUAGES[lang_codes]
A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs]
A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
A_ : Optional[int] = self.lang_tokens
A_ : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A_ : Dict = {}
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
@property
def _a ( self : int ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def _a ( self : List[str] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def _a ( self : Tuple , _lowerCamelCase : str ):
"""simple docstring"""
A_ : List[str] = self.lang_code_to_id[tgt_lang]
A_ : Optional[Any] = [lang_code_id]
def _a ( self : Optional[Any] , _lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self : List[Any] , _lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def _a ( self : int , _lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(_lowerCamelCase , self.unk_token )
def _a ( self : int , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[Any] = []
A_ : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
A_ : Tuple = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Tuple = [1] * len(self.prefix_tokens )
A_ : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : List[Any] = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : int = load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
A_ : Dict = Path(_lowerCamelCase )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
A_ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]:
with open(lowerCamelCase__ , '''r''' ) as f:
return json.load(lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None:
with open(lowerCamelCase__ , '''w''' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
| 4 | 1 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = ["""flax"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Any = ["""flax"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = ["""flax"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = ["""flax"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = ["""flax"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = ["""flax"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Any = ["""flax"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = ["""flax"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = ["""flax"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = ["""flax"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = ["""flax"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = ["""flax"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
class A_ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = ["""flax"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case , **snake_case ):
requires_backends(cls , ['flax'] )
| 195 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_attention_heads' ) )
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=3 , snake_case=2 , snake_case=1 , snake_case=16 , snake_case=[128, 256, 384] , snake_case=[4, 6, 8] , snake_case=[2, 3, 4] , snake_case=[16, 16, 16] , snake_case=0 , snake_case=[2, 2, 2] , snake_case=[2, 2, 2] , snake_case=0.02 , snake_case=True , snake_case=True , snake_case=2 , ):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = kernel_size
lowercase = stride
lowercase = padding
lowercase = hidden_sizes
lowercase = num_attention_heads
lowercase = depths
lowercase = key_dim
lowercase = drop_path_rate
lowercase = patch_size
lowercase = attention_ratio
lowercase = mlp_ratio
lowercase = initializer_range
lowercase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase = is_training
lowercase = use_labels
lowercase = num_labels
lowercase = initializer_range
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = LevitModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case )
lowercase = (self.image_size, self.image_size)
lowercase , lowercase = image_size[0], image_size[1]
for _ in range(4 ):
lowercase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = LevitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_UpperCamelCase : Dict = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LevitModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self ):
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='Levit does not output attentions' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(snake_case )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
def check_hidden_states_output(snake_case , snake_case , snake_case ):
lowercase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(snake_case , snake_case ) )
lowercase = outputs.hidden_states
lowercase = len(self.model_tester.depths ) + 1
self.assertEqual(len(snake_case ) , snake_case )
lowercase = (self.model_tester.image_size, self.model_tester.image_size)
lowercase , lowercase = image_size[0], image_size[1]
for _ in range(4 ):
lowercase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.model_tester.is_training:
return
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase = model_class(snake_case )
model.to(snake_case )
model.train()
lowercase = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
lowercase = model(**snake_case ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase = False
lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase = model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
lowercase = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
lowercase = model(**snake_case ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
lowercase = problem_type['title']
lowercase = problem_type['num_labels']
lowercase = model_class(snake_case )
model.to(snake_case )
model.train()
lowercase = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if problem_type["num_labels"] > 1:
lowercase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
lowercase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case ) as warning_list:
lowercase = model(**snake_case ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = LevitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase_ ( ):
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
snake_case )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
lowercase = model(**snake_case )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
lowercase = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 195 | 1 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def __lowerCAmelCase (_UpperCamelCase ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = F"Input value of [number={number}] must be an integer"
raise TypeError(_UpperCamelCase )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(_UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 359 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCamelCase__ = getLogger(__name__)
lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 8 , _UpperCamelCase = DEFAULT_DEVICE , _UpperCamelCase=False , _UpperCamelCase="summarization" , _UpperCamelCase=None , **_UpperCamelCase , ):
__lowerCAmelCase : str = Path(_UpperCamelCase ).open('w' , encoding='utf-8' )
__lowerCAmelCase : Union[str, Any] = str(_UpperCamelCase )
__lowerCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
if fpaa:
__lowerCAmelCase : Optional[Any] = model.half()
__lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowerCAmelCase : List[Any] = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase , _UpperCamelCase )
if prefix is None:
__lowerCAmelCase : Optional[int] = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(_UpperCamelCase , _UpperCamelCase ) ) ):
__lowerCAmelCase : List[str] = [prefix + text for text in examples_chunk]
__lowerCAmelCase : List[str] = tokenizer(_UpperCamelCase , return_tensors='pt' , truncation=_UpperCamelCase , padding='longest' ).to(_UpperCamelCase )
__lowerCAmelCase : str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_UpperCamelCase , )
__lowerCAmelCase : str = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__lowerCAmelCase : Optional[int] = int(time.time() - start_time ) # seconds
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __lowerCAmelCase ():
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __lowerCAmelCase (_UpperCamelCase=True ):
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('model_name' , type=_UpperCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=_UpperCamelCase , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=_UpperCamelCase , help='where to save summaries' )
parser.add_argument('--reference_path' , type=_UpperCamelCase , required=_UpperCamelCase , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=_UpperCamelCase , required=_UpperCamelCase , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=_UpperCamelCase , required=_UpperCamelCase , default=_UpperCamelCase , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=_UpperCamelCase , required=_UpperCamelCase , default=_UpperCamelCase , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=_UpperCamelCase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=_UpperCamelCase , default=8 , required=_UpperCamelCase , help='batch size' )
parser.add_argument(
'--n_obs' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=_UpperCamelCase , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = parser.parse_known_args()
__lowerCAmelCase : Optional[int] = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowerCAmelCase : Dict = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase : int = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__lowerCAmelCase : Optional[Any] = generate_summaries_or_translations(
_UpperCamelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_UpperCamelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase : str = calculate_bleu if 'translation' in args.task else calculate_rouge
__lowerCAmelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase : Dict = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCamelCase )]
__lowerCAmelCase : dict = score_fn(_UpperCamelCase , _UpperCamelCase )
scores.update(_UpperCamelCase )
if args.dump_args:
scores.update(_UpperCamelCase )
if args.info:
__lowerCAmelCase : Optional[Any] = args.info
if verbose:
print(_UpperCamelCase )
if args.score_path is not None:
json.dump(_UpperCamelCase , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True) | 182 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : list ) -> List[str]:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE_ )]
snake_case : str = []
def generate(lowercase : int , lowercase : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
snake_case ,snake_case : List[Any] = arr[k - 1], arr[i]
else: # k is odd
snake_case ,snake_case : Dict = arr[k - 1], arr[0]
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
generate(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
__snake_case = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 203 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
UpperCamelCase__ = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case__ , unittest.TestCase ):
_a : int = CamembertTokenizer
_a : Dict = CamembertTokenizerFast
_a : Tuple = True
_a : List[Any] = True
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = CamembertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_A ) , 1_0_0_4 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = CamembertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.encode(_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.tokenize(_A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {"input_ids": [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowerCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=_A , )
| 92 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = """bridgetower_vision_model"""
def __init__( self , lowerCAmelCase=7_68 , lowerCAmelCase=12 , lowerCAmelCase=3 , lowerCAmelCase=16 , lowerCAmelCase=2_88 , lowerCAmelCase=1 , lowerCAmelCase=1E-05 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=False , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_channels
snake_case = patch_size
snake_case = image_size
snake_case = initializer_factor
snake_case = layer_norm_eps
snake_case = stop_gradient
snake_case = share_layernorm
snake_case = remove_last_layer
@classmethod
def snake_case ( cls , lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
snake_case ,snake_case = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
if config_dict.get('model_type' ) == "bridgetower":
snake_case = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase )
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , lowerCAmelCase=5_02_65 , lowerCAmelCase=7_68 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=1 , lowerCAmelCase=30_72 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_14 , lowerCAmelCase=1 , lowerCAmelCase=1E-05 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = initializer_factor
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = pad_token_id
snake_case = bos_token_id
snake_case = eos_token_id
@classmethod
def snake_case ( cls , lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
snake_case ,snake_case = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
if config_dict.get('model_type' ) == "bridgetower":
snake_case = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase )
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : str = """bridgetower"""
def __init__( self , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=7_68 , lowerCAmelCase=1 , lowerCAmelCase=1E-05 , lowerCAmelCase=False , lowerCAmelCase="add" , lowerCAmelCase=12 , lowerCAmelCase=6 , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = kwargs.pop('text_config_dict' , lowerCAmelCase )
snake_case = kwargs.pop('vision_config_dict' , lowerCAmelCase )
super().__init__(**lowerCAmelCase )
snake_case = share_cross_modal_transformer_layers
snake_case = hidden_act
snake_case = hidden_size
snake_case = initializer_factor
snake_case = layer_norm_eps
snake_case = share_link_tower_layers
snake_case = link_tower_type
snake_case = num_attention_heads
snake_case = num_hidden_layers
snake_case = tie_word_embeddings
snake_case = init_layernorm_from_vision_encoder
if text_config is None:
snake_case = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
snake_case = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
snake_case = BridgeTowerTextConfig(**lowerCAmelCase )
snake_case = BridgeTowerVisionConfig(**lowerCAmelCase )
@classmethod
def snake_case ( cls , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = copy.deepcopy(self.__dict__ )
snake_case = self.text_config.to_dict()
snake_case = self.vision_config.to_dict()
snake_case = self.__class__.model_type
return output
| 361 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
SCREAMING_SNAKE_CASE__ = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 149 | 0 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _a( UpperCamelCase__ : Tuple ): # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def _a( UpperCamelCase__ : int ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = 42
snake_case_ = 42
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __magic_name__ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[Any] ={}
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : int =1
SCREAMING_SNAKE_CASE__ : List[Any] =[1, 2]
SCREAMING_SNAKE_CASE__ : int ={'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE__ : int ={'''a''': [1, 2], '''b''': [3, 4]}
SCREAMING_SNAKE_CASE__ : List[str] ={'''a''': {'''1''': 1}, '''b''': 2}
SCREAMING_SNAKE_CASE__ : Optional[Any] ={'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
SCREAMING_SNAKE_CASE__ : Any ={}
SCREAMING_SNAKE_CASE__ : Optional[Any] =[]
SCREAMING_SNAKE_CASE__ : Tuple =2
SCREAMING_SNAKE_CASE__ : List[Any] =[2, 3]
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''a''': 2, '''b''': 3}
SCREAMING_SNAKE_CASE__ : Optional[int] ={'''a''': [2, 3], '''b''': [4, 5]}
SCREAMING_SNAKE_CASE__ : List[Any] ={'''a''': {'''1''': 2}, '''b''': 3}
SCREAMING_SNAKE_CASE__ : str ={'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase ) , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =2
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase ) , __lowercase )
self.assertEqual(map_nested(__lowercase , __lowercase , num_proc=__lowercase ) , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict ={'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
SCREAMING_SNAKE_CASE__ : List[Any] ={'''a''': 2, '''b''': 0, '''c''': 2}
SCREAMING_SNAKE_CASE__ : str ={
'''a''': np.eye(2 ).astype(__lowercase ),
'''b''': np.zeros(3 ).astype(__lowercase ),
'''c''': np.ones(2 ).astype(__lowercase ),
}
self.assertEqual(map_nested(__lowercase , __lowercase , map_numpy=__lowercase ) , __lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowercase , __lowercase , map_numpy=__lowercase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__lowercase , __lowercase , map_numpy=__lowercase , num_proc=__lowercase ) , __lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowercase , __lowercase , map_numpy=__lowercase , num_proc=__lowercase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__lowercase ): # can't pickle a local lambda
map_nested(lambda __lowercase : x + 1 , __lowercase , num_proc=__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] ={'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''a''': 3, '''b''': 4}
SCREAMING_SNAKE_CASE__ : List[Any] ={'''a''': 5, '''b''': 6}
SCREAMING_SNAKE_CASE__ : Dict =sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__lowercase , __lowercase , __lowercase ) ) , __lowercase )
def __magic_name__ ( self : Optional[int] ) -> Tuple:
class __SCREAMING_SNAKE_CASE :
snake_case_ = """bar"""
SCREAMING_SNAKE_CASE__ : Dict =Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(__lowercase , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''', [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(1_6, 1_6, 1_6),
(1_6, 1_7, 1_6),
(1_7, 1_6, 1_6),
], )
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : Dict ):
'''simple docstring'''
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE__ : Tuple ={f"{i}": i for i in range(UpperCamelCase__ )}
SCREAMING_SNAKE_CASE__ : List[Any] =map_nested(lambda UpperCamelCase__ : x + 1_0, UpperCamelCase__, num_proc=UpperCamelCase__, parallel_min_length=1_6 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@require_tf
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] =layers.Dense(2 )
def gen_random_output():
SCREAMING_SNAKE_CASE__ : Optional[Any] =tf.random.uniform((1, 3) )
return model(__lowercase ).numpy()
with temp_seed(42 , set_tensorflow=__lowercase ):
SCREAMING_SNAKE_CASE__ : Any =gen_random_output()
with temp_seed(42 , set_tensorflow=__lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] =gen_random_output()
SCREAMING_SNAKE_CASE__ : Tuple =gen_random_output()
np.testing.assert_equal(__lowercase , __lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.nn.Linear(3 , 2 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.rand(1 , 3 )
return model(__lowercase ).detach().numpy()
with temp_seed(42 , set_pytorch=__lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple =gen_random_output()
with temp_seed(42 , set_pytorch=__lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =gen_random_output()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =gen_random_output()
np.testing.assert_equal(__lowercase , __lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __magic_name__ ( self : Any ) -> str:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
SCREAMING_SNAKE_CASE__ : List[Any] =gen_random_output()
with temp_seed(42 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =gen_random_output()
SCREAMING_SNAKE_CASE__ : str =gen_random_output()
np.testing.assert_equal(__lowercase , __lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''', [{}] )
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =NestedDataStructure(UpperCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''', [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
], )
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =NestedDataStructure(UpperCamelCase__ ).flatten()
assert output == expected_output
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =A(x=1, y='''foobar''' )
SCREAMING_SNAKE_CASE__ : Any ={'''x''': 1, '''y''': '''foobar'''}
assert asdict(UpperCamelCase__ ) == expected_output
SCREAMING_SNAKE_CASE__ : List[Any] ={'''a''': {'''b''': A(x=1_0, y='''foo''' )}, '''c''': [A(x=2_0, y='''bar''' )]}
SCREAMING_SNAKE_CASE__ : Optional[int] ={'''a''': {'''b''': {'''x''': 1_0, '''y''': '''foo'''}}, '''c''': [{'''x''': 2_0, '''y''': '''bar'''}]}
assert asdict(UpperCamelCase__ ) == expected_output
with pytest.raises(UpperCamelCase__ ):
asdict([1, A(x=1_0, y='''foo''' )] )
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
return text.split()
def _a( UpperCamelCase__ : Any ):
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _a( ):
'''simple docstring'''
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE__ : int =list(iflatmap_unordered(UpperCamelCase__, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 1_0 ) )
assert out.count('''hello''' ) == 1_0
assert out.count('''there''' ) == 1_0
assert len(UpperCamelCase__ ) == 2_0
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE__ : List[str] =list(iflatmap_unordered(UpperCamelCase__, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 1_0 ) )
assert out.count('''hello''' ) == 1_0
assert out.count('''there''' ) == 1_0
assert len(UpperCamelCase__ ) == 2_0
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE__ : str =[]
for yield_time, content in iflatmap_unordered(
UpperCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCamelCase__ )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(UpperCamelCase__ ) == 4 | 152 |
'''simple docstring'''
import socket
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =socket.socket(socket.AF_INET, socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE__ : str =socket.gethostname()
SCREAMING_SNAKE_CASE__ : List[Any] =1_2_3_1_2
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''', '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
SCREAMING_SNAKE_CASE__ : List[str] =sock.recv(1_0_2_4 )
if not data:
break
out_file.write(UpperCamelCase__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main() | 152 | 1 |
import os
import sys
import unittest
A : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = find_backend(" if not is_torch_available():" )
self.assertEqual(lowercase_ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
SCREAMING_SNAKE_CASE_ = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(lowercase_ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
SCREAMING_SNAKE_CASE_ = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(lowercase_ , "torch_and_transformers_and_onnx" )
def __A ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , lowercase_ )
self.assertIn("torch_and_transformers" , lowercase_ )
self.assertIn("flax_and_transformers" , lowercase_ )
self.assertIn("torch_and_transformers_and_onnx" , lowercase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def __A ( self : int ) -> int:
SCREAMING_SNAKE_CASE_ = create_dummy_object("CONSTANT" , "\'torch\'" )
self.assertEqual(lowercase_ , "\nCONSTANT = None\n" )
SCREAMING_SNAKE_CASE_ = create_dummy_object("function" , "\'torch\'" )
self.assertEqual(
lowercase_ , "\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n" )
SCREAMING_SNAKE_CASE_ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
SCREAMING_SNAKE_CASE_ = create_dummy_object("FakeClass" , "\'torch\'" )
self.assertEqual(lowercase_ , lowercase_ )
def __A ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
SCREAMING_SNAKE_CASE_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , lowercase_ )
| 358 | import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE_ = BlipaProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def __A ( self : Dict , **__magic_name__ : List[Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def __A ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" )
SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 305 | 0 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@require_torch
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
lowerCAmelCase = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
lowerCAmelCase = load_dataset('ashraq/esc50' )
lowerCAmelCase = dataset['train']['audio'][-1]['array']
lowerCAmelCase = audio_classifier(UpperCAmelCase__ , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@slow
@require_torch
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
lowerCAmelCase = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
lowerCAmelCase = load_dataset('ashraq/esc50' )
lowerCAmelCase = dataset['train']['audio'][-1]['array']
lowerCAmelCase = audio_classifier(UpperCAmelCase__ , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
lowerCAmelCase = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
lowerCAmelCase = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
pass
| 4 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 1 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ComputeEnvironment.AMAZON_SAGEMAKER
a_ = True
a_ = "ml.p3.2xlarge"
a_ = "accelerate_sagemaker_execution_role"
a_ = "hf-sm"
a_ = "us-east-1"
a_ = 1
a_ = "accelerate-sagemaker-1"
a_ = "1.6"
a_ = "4.4"
a_ = "train.py"
a_ = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
a_ = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
snake_case__ : Optional[int] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , __A )
assert isinstance(converted_args["do_train"] , __A )
assert isinstance(converted_args["epochs"] , __A )
assert isinstance(converted_args["learning_rate"] , __A )
assert isinstance(converted_args["max_steps"] , __A )
with pytest.raises(__A ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 363 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Union[str, Any] = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286 | 0 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=3_0 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0 , UpperCAmelCase=0.02 , UpperCAmelCase=None , ) -> List[str]:
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =is_training
_lowercase =use_labels
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase =(image_size // patch_size) ** 2
_lowercase =num_patches + 1
def __A (self ) -> Dict:
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =self.get_config()
return config, pixel_values, labels
def __A (self ) -> Dict:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
_lowercase =ViTMSNModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
_lowercase =self.type_sequence_label_size
_lowercase =ViTMSNForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , labels=UpperCAmelCase )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase =1
_lowercase =ViTMSNForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A (self ) -> str:
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase =config_and_inputs
_lowercase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> int:
_lowercase =ViTMSNModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=3_7 )
def __A (self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def __A (self ) -> List[str]:
pass
def __A (self ) -> Dict:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def __A (self ) -> List[Any]:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(UpperCAmelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A (self ) -> Optional[int]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> str:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def __A (self ) -> Optional[Any]:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =ViTMSNModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
_lowercase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
@cached_property
def __A (self ) -> Tuple:
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def __A (self ) -> Optional[Any]:
torch.manual_seed(2 )
_lowercase =ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(UpperCAmelCase )
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(images=UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowercase =model(**UpperCAmelCase )
# verify the logits
_lowercase =torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_lowercase =torch.tensor([-0.0803, -0.4454, -0.2375] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
| 5 | import itertools
import string
from collections.abc import Generator, Iterable
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = iter(_lowercase )
while True:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(itertools.islice(_lowercase , _lowercase ) )
if not chunk:
return
yield chunk
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : int = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
SCREAMING_SNAKE_CASE : List[str] = ''''''
if len(_lowercase ) < 2:
return dirty
for i in range(len(_lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowercase ) & 1:
clean += "X"
return clean
def A ( _lowercase ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
SCREAMING_SNAKE_CASE : Optional[Any] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
SCREAMING_SNAKE_CASE : List[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowercase )
return table
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = generate_table(_lowercase )
SCREAMING_SNAKE_CASE : Any = prepare_input(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = divmod(table.index(_lowercase ) , 5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = generate_table(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = divmod(table.index(_lowercase ) , 5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 182 | 0 |
def __snake_case ( _lowerCAmelCase : bytes ) -> str:
return "".join([hex(_lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(_lowerCAmelCase )] )
def __snake_case ( _lowerCAmelCase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_lowerCAmelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_lowerCAmelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __magic_name__ ( nn.Module ):
"""simple docstring"""
def __init__( self :int , snake_case :int = 16 , snake_case :int = 88 , snake_case :Optional[int] = None , snake_case :int = 1 , snake_case :float = 0.0 , snake_case :int = 32 , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[int] = None , snake_case :str = "geglu" , snake_case :Optional[int] = None , ):
'''simple docstring'''
super().__init__()
A_ : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A_ : Tuple = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A_ : Optional[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A_ : Union[str, Any] = [1, 0]
def SCREAMING_SNAKE_CASE ( self :int , snake_case :int , snake_case :List[Any] , snake_case :int=None , snake_case :Optional[Any]=None , snake_case :Tuple=None , snake_case :bool = True , ):
'''simple docstring'''
A_ : List[str] = hidden_states
A_ : Optional[Any] = []
A_ : List[str] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A_ : str = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A_ : Optional[int] = self.transformer_index_for_condition[i]
A_ : Union[str, Any] = self.transformers[transformer_index](
snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A_ : Optional[int] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A_ : Optional[int] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case )
| 70 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase_ (UpperCamelCase__ ):
'''simple docstring'''
def _A ( self : List[Any] , A : float ):
return 0.0
def UpperCamelCase_ ( _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_UpperCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = 512
_UpperCAmelCase : Dict = [1] + [0] * (size - 1)
_UpperCAmelCase : List[Any] = [filter_type.process(A_ ) for item in inputs]
_UpperCAmelCase : Optional[int] = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase : Optional[int] = np.abs(np.fft.fft(A_ ) )
_UpperCAmelCase : Optional[Any] = 20 * np.logaa(A_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_UpperCAmelCase : Optional[int] = get_bounds(A_ , A_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(A_ )
plt.show()
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = 512
_UpperCAmelCase : Union[str, Any] = [1] + [0] * (size - 1)
_UpperCAmelCase : Any = [filter_type.process(A_ ) for item in inputs]
_UpperCAmelCase : str = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase : Tuple = np.angle(np.fft.fft(A_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(A_ , -2 * pi ) )
plt.show()
| 31 |
import os
from datetime import datetime as dt
from github import Github
A__: int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCAmelCase_ ( ):
UpperCamelCase__: Dict = Github(os.environ["GITHUB_TOKEN"])
UpperCamelCase__: Union[str, Any] = g.get_repo("huggingface/diffusers")
UpperCamelCase__: str = repo.get_issues(state="open")
for issue in open_issues:
UpperCamelCase__: Union[str, Any] = sorted(issue.get_comments() ,key=lambda A_: i.created_at ,reverse=A_)
UpperCamelCase__: Tuple = comments[0] if len(A_) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed")
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open")
issue.remove_from_labels("stale")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
issue.add_to_labels("stale")
if __name__ == "__main__":
main()
| 149 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : str = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase_ : Any = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
lowerCAmelCase_ : Tuple = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
lowerCAmelCase_ : Any = '''▁'''
class __lowerCAmelCase ( __a ):
snake_case : int = VOCAB_FILES_NAMES
snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : str = ["""input_ids""", """attention_mask"""]
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
_UpperCAmelCase : Tuple = vocab_file
_UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
_UpperCAmelCase : Any = len(self.sp_model ) - 1
_UpperCAmelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
_UpperCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : Optional[Any] = [self.sep_token_id]
_UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ (self ):
return len(self.sp_model )
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ (self , lowerCAmelCase__ ):
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(lowerCAmelCase__ )
return spm_id if spm_id else self.unk_token_id
def snake_case_ (self , lowerCAmelCase__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Optional[int] = """"""
_UpperCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def __getstate__(self ):
_UpperCAmelCase : str = self.__dict__.copy()
_UpperCAmelCase : Any = None
return state
def __setstate__(self , lowerCAmelCase__ ):
_UpperCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_UpperCAmelCase : int = {}
_UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase : Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 350 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __A ( lowerCAmelCase_ ):
return EnvironmentCommand()
def __A ( lowerCAmelCase_ ):
return EnvironmentCommand(args.accelerate_config_file )
class __lowerCAmelCase ( __a ):
@staticmethod
def snake_case_ (lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase__ )
def __init__(self , lowerCAmelCase__ , *lowerCAmelCase__ ):
_UpperCAmelCase : str = accelerate_config_file
def snake_case_ (self ):
_UpperCAmelCase : Dict = """not installed"""
if is_safetensors_available():
import safetensors
_UpperCAmelCase : Any = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_UpperCAmelCase : Optional[Any] = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
_UpperCAmelCase : str = """not installed"""
_UpperCAmelCase : List[Any] = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_UpperCAmelCase : List[str] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
_UpperCAmelCase : Optional[Any] = (
"""\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else F"\t{accelerate_config}"
)
_UpperCAmelCase : Dict = """not installed"""
_UpperCAmelCase : int = """NA"""
if is_torch_available():
import torch
_UpperCAmelCase : int = torch.__version__
_UpperCAmelCase : Optional[Any] = torch.cuda.is_available()
_UpperCAmelCase : Optional[Any] = """not installed"""
_UpperCAmelCase : Tuple = """NA"""
if is_tf_available():
import tensorflow as tf
_UpperCAmelCase : Dict = tf.__version__
try:
# deprecated in v2.1
_UpperCAmelCase : List[str] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_UpperCAmelCase : Any = bool(tf.config.list_physical_devices("""GPU""" ) )
_UpperCAmelCase : Dict = """not installed"""
_UpperCAmelCase : Optional[Any] = """not installed"""
_UpperCAmelCase : Dict = """not installed"""
_UpperCAmelCase : Tuple = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_UpperCAmelCase : str = flax.__version__
_UpperCAmelCase : Optional[Any] = jax.__version__
_UpperCAmelCase : Optional[int] = jaxlib.__version__
_UpperCAmelCase : Tuple = jax.lib.xla_bridge.get_backend().platform
_UpperCAmelCase : str = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F"{safetensors_version}",
"""Accelerate version""": F"{accelerate_version}",
"""Accelerate config""": F"{accelerate_config_str}",
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""Tensorflow version (GPU?)""": F"{tf_version} ({tf_cuda_available})",
"""Flax version (CPU?/GPU?/TPU?)""": F"{flax_version} ({jax_backend})",
"""Jax version""": F"{jax_version}",
"""JaxLib version""": F"{jaxlib_version}",
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase__ ) )
return info
@staticmethod
def snake_case_ (lowerCAmelCase__ ):
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 170 | 0 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def __lowerCAmelCase ( UpperCamelCase__ = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def __lowerCAmelCase ( UpperCamelCase__ = "" ) -> bool:
if len(UpperCamelCase__ ) == 0:
return True
__lowerCamelCase = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCamelCase = {}
for character in lower_case_input_str:
__lowerCamelCase = character_freq_dict.get(UpperCamelCase__ , 0 ) + 1
__lowerCamelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCAmelCase ( UpperCamelCase__ = "" ) -> None:
print('''\nFor string = ''' , UpperCamelCase__ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(UpperCamelCase__ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(UpperCamelCase__ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
__UpperCAmelCase =input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
__UpperCAmelCase =can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 67 |
def UpperCamelCase ( __magic_name__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ = []
for temp in range(int(__magic_name__ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
A : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 305 | 0 |
"""simple docstring"""
import pytest
A_ : Dict ="""__dummy_dataset1__"""
A_ : Tuple ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( )-> int:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( )-> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : Dict , snake_case : Dict )-> List[str]:
_lowerCamelCase = dataset_loading_script_name
_lowerCamelCase = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=snake_case )
_lowerCamelCase = script_dir / f'{script_name}.py'
with open(snake_case , 'w' ) as f:
f.write(snake_case )
return str(snake_case )
| 368 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
A_ : Union[str, Any] =logging.get_logger(__name__)
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , a__ , )
super().__init__(*a__ , **a__ )
| 80 | 0 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[int] = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
__lowercase : List[str] = flatten_dict(_UpperCAmelCase )
return flax_params
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Any = {}
__lowercase : str = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__lowercase : Dict = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__lowercase : List[str] = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__lowercase : Dict = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__lowercase : int = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__lowercase : Optional[int] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , _UpperCAmelCase )
__lowercase : Any = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__lowercase : Any = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , _UpperCAmelCase )
__lowercase : Optional[Any] = flax_dict[key]
__lowercase : List[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__lowercase : Optional[Any] = torch.from_numpy(converted_dict[key].T )
else:
__lowercase : Union[str, Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ):
__lowercase : Dict = get_flax_param(_UpperCAmelCase )
if not use_large:
__lowercase : Any = PixaStructVisionConfig()
__lowercase : List[str] = PixaStructTextConfig()
else:
__lowercase : List[str] = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__lowercase : Union[str, Any] = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__lowercase : List[Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_UpperCAmelCase )
__lowercase : str = PixaStructForConditionalGeneration(_UpperCAmelCase )
__lowercase : Dict = rename_and_convert_flax_params(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
__lowercase : int = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
__lowercase : List[Any] = PixaStructImageProcessor()
__lowercase : str = PixaStructProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
if use_large:
__lowercase : Tuple = 40_96
__lowercase : Tuple = True
# mkdir if needed
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
print('''Model saved in {}'''.format(_UpperCAmelCase ) )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
a_ = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 249 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = R'\w+[.]\d+'
A_ : int = re.findall(_UpperCAmelCase , _UpperCAmelCase )
for pat in pats:
A_ : Optional[int] = key.replace(_UpperCAmelCase , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A_ : Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A_ : List[str] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A_ : Optional[Any] = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A_ : int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A_ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A_ : Optional[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A_ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A_ : Tuple = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A_ : Optional[int] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=42 ):
"""simple docstring"""
A_ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A_ : Union[str, Any] = flax_model.init_weights(PRNGKey(_UpperCAmelCase ) )
A_ : Optional[Any] = flatten_dict(_UpperCAmelCase )
A_ : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A_ : Any = rename_key(_UpperCAmelCase )
A_ : List[str] = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A_ , A_ : Union[str, Any] = rename_key_and_reshape_tensor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A_ : str = jnp.asarray(_UpperCAmelCase )
return unflatten_dict(_UpperCAmelCase ) | 286 | 0 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_A = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def UpperCAmelCase ( a_, a_, a_, a_, a_, a_, a_, a_=False, ):
'''simple docstring'''
output_path.parent.mkdir(parents=a_, exist_ok=a_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a_, a_, f=output_path.as_posix(), input_names=a_, output_names=a_, dynamic_axes=a_, do_constant_folding=a_, use_external_data_format=a_, enable_onnx_checker=a_, opset_version=a_, )
else:
export(
a_, a_, f=output_path.as_posix(), input_names=a_, output_names=a_, dynamic_axes=a_, do_constant_folding=a_, opset_version=a_, )
@torch.no_grad()
def UpperCAmelCase ( a_, a_, a_, a_ = False ):
'''simple docstring'''
lowerCamelCase : List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase : Union[str, Any] = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
lowerCamelCase : Union[str, Any] = 'cpu'
lowerCamelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained(a_, torch_dtype=a_ ).to(a_ )
lowerCamelCase : str = Path(a_ )
# TEXT ENCODER
lowerCamelCase : int = pipeline.text_encoder.config.max_position_embeddings
lowerCamelCase : str = pipeline.text_encoder.config.hidden_size
lowerCamelCase : Union[str, Any] = pipeline.tokenizer(
'A sample prompt', padding='max_length', max_length=pipeline.tokenizer.model_max_length, truncation=a_, return_tensors='pt', )
onnx_export(
pipeline.text_encoder, model_args=(text_input.input_ids.to(device=a_, dtype=torch.intaa )), output_path=output_path / 'text_encoder' / 'model.onnx', ordered_input_names=['input_ids'], output_names=['last_hidden_state', 'pooler_output'], dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
}, opset=a_, )
del pipeline.text_encoder
# UNET
lowerCamelCase : Any = pipeline.unet.config.in_channels
lowerCamelCase : int = pipeline.unet.config.sample_size
lowerCamelCase : Optional[int] = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet, model_args=(
torch.randn(2, a_, a_, a_ ).to(device=a_, dtype=a_ ),
torch.randn(2 ).to(device=a_, dtype=a_ ),
torch.randn(2, a_, a_ ).to(device=a_, dtype=a_ ),
False,
), output_path=a_, ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'], output_names=['out_sample'], dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
}, opset=a_, use_external_data_format=a_, )
lowerCamelCase : List[str] = str(unet_path.absolute().as_posix() )
lowerCamelCase : Any = os.path.dirname(a_ )
lowerCamelCase : List[str] = onnx.load(a_ )
# clean up existing tensor files
shutil.rmtree(a_ )
os.mkdir(a_ )
# collate external tensor files into one
onnx.save_model(
a_, a_, save_as_external_data=a_, all_tensors_to_one_file=a_, location='weights.pb', convert_attribute=a_, )
del pipeline.unet
# VAE ENCODER
lowerCamelCase : Optional[Any] = pipeline.vae
lowerCamelCase : List[str] = vae_encoder.config.in_channels
lowerCamelCase : Optional[int] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCamelCase : str = lambda a_, a_ : vae_encoder.encode(a_, a_ )[0].sample()
onnx_export(
a_, model_args=(
torch.randn(1, a_, a_, a_ ).to(device=a_, dtype=a_ ),
False,
), output_path=output_path / 'vae_encoder' / 'model.onnx', ordered_input_names=['sample', 'return_dict'], output_names=['latent_sample'], dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
}, opset=a_, )
# VAE DECODER
lowerCamelCase : str = pipeline.vae
lowerCamelCase : Tuple = vae_decoder.config.latent_channels
lowerCamelCase : Any = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCamelCase : List[str] = vae_encoder.decode
onnx_export(
a_, model_args=(
torch.randn(1, a_, a_, a_ ).to(device=a_, dtype=a_ ),
False,
), output_path=output_path / 'vae_decoder' / 'model.onnx', ordered_input_names=['latent_sample', 'return_dict'], output_names=['sample'], dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
}, opset=a_, )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCamelCase : Tuple = pipeline.safety_checker
lowerCamelCase : int = safety_checker.config.vision_config.num_channels
lowerCamelCase : Any = safety_checker.config.vision_config.image_size
lowerCamelCase : Tuple = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker, model_args=(
torch.randn(
1, a_, a_, a_, ).to(device=a_, dtype=a_ ),
torch.randn(1, a_, a_, a_ ).to(device=a_, dtype=a_ ),
), output_path=output_path / 'safety_checker' / 'model.onnx', ordered_input_names=['clip_input', 'images'], output_names=['out_images', 'has_nsfw_concepts'], dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
}, opset=a_, )
del pipeline.safety_checker
lowerCamelCase : Any = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
lowerCamelCase : Any = pipeline.feature_extractor
else:
lowerCamelCase : str = None
lowerCamelCase : Tuple = None
lowerCamelCase : str = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ), vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ), text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ), tokenizer=pipeline.tokenizer, unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ), scheduler=pipeline.scheduler, safety_checker=a_, feature_extractor=a_, requires_safety_checker=safety_checker is not None, )
onnx_pipeline.save_pretrained(a_ )
print('ONNX pipeline saved to', a_ )
del pipeline
del onnx_pipeline
lowerCamelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(a_, provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
_A = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 205 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _lowercase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowercase_ = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
lowerCamelCase : Optional[int] = CursorInfo()
lowerCamelCase : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
lowerCamelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
lowerCamelCase : List[str] = CursorInfo()
lowerCamelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
lowerCamelCase : Optional[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def UpperCAmelCase ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 205 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
class UpperCAmelCase :
def __init__( self : Any , __snake_case : int | None = None ) -> str:
_lowerCAmelCase = value
_lowerCAmelCase = random()
_lowerCAmelCase = None
_lowerCAmelCase = None
def __repr__( self : Dict ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ) -> str:
_lowerCAmelCase = str(self.value ) + """ """
_lowerCAmelCase = str(self.left or """""" )
_lowerCAmelCase = str(self.right or """""" )
return value + left + right
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCAmelCase , _lowerCAmelCase = split(root.left , lowerCAmelCase )
return left, root
else:
_lowerCAmelCase , _lowerCAmelCase = split(root.right , lowerCAmelCase )
return root, right
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCAmelCase = merge(left.right , lowerCAmelCase )
return left
else:
_lowerCAmelCase = merge(lowerCAmelCase , right.left )
return right
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = Node(lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = split(lowerCAmelCase , value - 1 )
_lowerCAmelCase , _lowerCAmelCase = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
_lowerCAmelCase = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCAmelCase = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
_lowerCAmelCase = input()
while args != "q":
_lowerCAmelCase = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_lowerCAmelCase = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 70 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[Any] ) -> str:
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__snake_case ).to(__snake_case )
_lowerCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_lowerCAmelCase = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
_lowerCAmelCase = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
_lowerCAmelCase = model(input_ids.to(__snake_case ) , labels=labels.to(__snake_case ) ).loss
_lowerCAmelCase = -(labels.shape[-1] * loss.item())
_lowerCAmelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 70 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionInstructPixaPixPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
a__ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a__ =IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ =IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
_UpperCAmelCase : List[str] = PNDMScheduler(skip_prk_steps=A )
torch.manual_seed(0 )
_UpperCAmelCase : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> Optional[int]:
_UpperCAmelCase : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : Dict = Image.fromarray(np.uinta(A ) ).convert('''RGB''' )
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : str = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : str = self.get_dummy_components()
_UpperCAmelCase : Tuple = StableDiffusionInstructPixaPixPipeline(**A )
_UpperCAmelCase : Any = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = sd_pipe(**A ).images
_UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase : int = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Any = self.get_dummy_components()
_UpperCAmelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**A )
_UpperCAmelCase : str = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : str = self.get_dummy_inputs(A )
_UpperCAmelCase : List[str] = '''french fries'''
_UpperCAmelCase : Optional[Any] = sd_pipe(**A , negative_prompt=A )
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase : str = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[Any] = self.get_dummy_components()
_UpperCAmelCase : Dict = StableDiffusionInstructPixaPixPipeline(**A )
_UpperCAmelCase : List[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = self.get_dummy_inputs(A )
_UpperCAmelCase : Dict = [inputs['''prompt''']] * 2
_UpperCAmelCase : Dict = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
_UpperCAmelCase : Optional[Any] = torch.from_numpy(A ).unsqueeze(0 ).to(A )
_UpperCAmelCase : str = image / 2 + 0.5
_UpperCAmelCase : List[str] = image.permute(0 , 3 , 1 , 2 )
_UpperCAmelCase : str = image.repeat(2 , 1 , 1 , 1 )
_UpperCAmelCase : Any = sd_pipe(**A ).images
_UpperCAmelCase : Optional[Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
_UpperCAmelCase : Optional[Any] = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Any = self.get_dummy_components()
_UpperCAmelCase : Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
_UpperCAmelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**A )
_UpperCAmelCase : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Tuple = self.get_dummy_inputs(A )
_UpperCAmelCase : Tuple = sd_pipe(**A ).images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[Any] = [round(A , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(A ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase : Dict = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCAmelCase ( self ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
_UpperCAmelCase : int = StableDiffusionInstructPixaPixPipeline(**A )
_UpperCAmelCase : List[Any] = VaeImageProcessor(do_resize=A , do_normalize=A )
_UpperCAmelCase : str = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = pipe(**self.get_dummy_inputs_by_type(A , input_image_type='''pt''' ) )[0]
_UpperCAmelCase : Dict = components['''vae''']
_UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs_by_type(A , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_UpperCAmelCase : List[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
_UpperCAmelCase : Dict = pipe(**A )[0]
_UpperCAmelCase : List[str] = np.abs(out - out_latents_inputs ).max()
self.assertLess(A , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , A=0 ) -> List[Any]:
_UpperCAmelCase : str = torch.manual_seed(A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
_UpperCAmelCase : List[str] = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
_UpperCAmelCase : Optional[int] = self.get_inputs()
_UpperCAmelCase : Optional[int] = pipe(**A ).images
_UpperCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=A )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
_UpperCAmelCase : Optional[int] = self.get_inputs()
_UpperCAmelCase : str = pipe(**A ).images
_UpperCAmelCase : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=A )
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
_UpperCAmelCase : str = self.get_inputs()
_UpperCAmelCase : Dict = pipe(**A ).images
_UpperCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Dict = 0
def callback_fn(A , A , A ) -> None:
_UpperCAmelCase : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCAmelCase : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
_UpperCAmelCase : List[Any] = latents[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_UpperCAmelCase : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
_UpperCAmelCase : Tuple = latents[0, -3:, -3:, -1]
_UpperCAmelCase : Union[str, Any] = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=A , torch_dtype=torch.floataa )
_UpperCAmelCase : Optional[int] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
_UpperCAmelCase : Dict = self.get_inputs()
pipe(**A , callback=A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=A , torch_dtype=torch.floataa )
_UpperCAmelCase : Optional[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase : int = self.get_inputs()
_UpperCAmelCase : Dict = pipe(**A )
_UpperCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Tuple = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCAmelCase : str = inputs['''image'''].resize((5_0_4, 5_0_4) )
_UpperCAmelCase : List[Any] = '''timbrooks/instruct-pix2pix'''
_UpperCAmelCase : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
_UpperCAmelCase : int = pipe(**A )
_UpperCAmelCase : Optional[int] = output.images[0]
_UpperCAmelCase : Union[str, Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
_UpperCAmelCase : Tuple = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 68 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =42
a__ =None
# Automatically constructed
a__ ="dict"
a__ =None
a__ =field(default='''Translation''' ,init=a ,repr=a )
def __call__( self ) -> List[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =None
a__ =None
a__ =None
# Automatically constructed
a__ ="dict"
a__ =None
a__ =field(default='''TranslationVariableLanguages''' ,init=a ,repr=a )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : int = sorted(set(self.languages ) ) if self.languages else None
_UpperCAmelCase : List[str] = len(self.languages ) if self.languages else None
def __call__( self ) -> str:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __lowerCAmelCase ( self , A ) -> List[Any]:
_UpperCAmelCase : List[str] = set(self.languages )
if self.languages and set(A ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(A ) - lang_set ) )}) are not in valid set ({", ".join(A )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_UpperCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(A , A ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = zip(*sorted(A ) )
return {"language": languages, "translation": translations}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 68 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class snake_case__ ( A__):
def __init__( self : Union[str, Any] , *_A : Dict , **_A : Dict ) -> None:
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 304 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Union[str, Any] =logging.get_logger(__name__)
def lowerCAmelCase_ ( _lowercase : List[Any]) -> Optional[int]:
"""simple docstring"""
a__ : int = DPTConfig(embedding_type="""hybrid""")
if "large" in checkpoint_url:
a__ : Tuple = 1024
a__ : int = 4096
a__ : str = 24
a__ : List[str] = 16
a__ : Optional[Any] = [5, 11, 17, 23]
a__ : Union[str, Any] = [256, 512, 1024, 1024]
a__ : str = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
a__ : Dict = 768
a__ : Dict = [1, 1, 1, 0.5]
a__ : Dict = [256, 512, 768, 768]
a__ : Union[str, Any] = 150
a__ : List[Any] = 16
a__ : List[Any] = (1, 384, 384)
a__ : Optional[Any] = False
a__ : Tuple = """project"""
if "ade" in checkpoint_url:
a__ : int = True
a__ : Any = 768
a__ : Tuple = [1, 1, 1, 0.5]
a__ : str = 150
a__ : Optional[int] = 16
a__ : Optional[Any] = """huggingface/label-files"""
a__ : Any = """ade20k-id2label.json"""
a__ : List[Any] = json.load(open(cached_download(hf_hub_url(_lowercase , _lowercase , repo_type="""dataset""")) , """r"""))
a__ : Union[str, Any] = {int(_lowercase): v for k, v in idalabel.items()}
a__ : List[Any] = idalabel
a__ : List[Any] = {v: k for k, v in idalabel.items()}
a__ : List[str] = [1, 150, 480, 480]
return config, expected_shape
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> List[str]:
"""simple docstring"""
a__ : List[str] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : Dict) -> Optional[int]:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
a__ : int = name.replace("""pretrained.model""" , """dpt.encoder""")
if "pretrained.model" in name:
a__ : Optional[Any] = name.replace("""pretrained.model""" , """dpt.embeddings""")
if "patch_embed" in name:
a__ : Any = name.replace("""patch_embed""" , """""")
if "pos_embed" in name:
a__ : Optional[Any] = name.replace("""pos_embed""" , """position_embeddings""")
if "attn.proj" in name:
a__ : Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""")
if "proj" in name and "project" not in name:
a__ : List[Any] = name.replace("""proj""" , """projection""")
if "blocks" in name:
a__ : int = name.replace("""blocks""" , """layer""")
if "mlp.fc1" in name:
a__ : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""")
if "mlp.fc2" in name:
a__ : Tuple = name.replace("""mlp.fc2""" , """output.dense""")
if "norm1" in name and "backbone" not in name:
a__ : List[str] = name.replace("""norm1""" , """layernorm_before""")
if "norm2" in name and "backbone" not in name:
a__ : List[str] = name.replace("""norm2""" , """layernorm_after""")
if "scratch.output_conv" in name:
a__ : int = name.replace("""scratch.output_conv""" , """head""")
if "scratch" in name:
a__ : List[Any] = name.replace("""scratch""" , """neck""")
if "layer1_rn" in name:
a__ : Optional[Any] = name.replace("""layer1_rn""" , """convs.0""")
if "layer2_rn" in name:
a__ : List[Any] = name.replace("""layer2_rn""" , """convs.1""")
if "layer3_rn" in name:
a__ : Dict = name.replace("""layer3_rn""" , """convs.2""")
if "layer4_rn" in name:
a__ : Optional[int] = name.replace("""layer4_rn""" , """convs.3""")
if "refinenet" in name:
a__ : int = int(name[len("""neck.refinenet""") : len("""neck.refinenet""") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
a__ : int = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
a__ : Optional[Any] = name.replace("""out_conv""" , """projection""")
if "resConfUnit1" in name:
a__ : int = name.replace("""resConfUnit1""" , """residual_layer1""")
if "resConfUnit2" in name:
a__ : Union[str, Any] = name.replace("""resConfUnit2""" , """residual_layer2""")
if "conv1" in name:
a__ : Dict = name.replace("""conv1""" , """convolution1""")
if "conv2" in name:
a__ : Any = name.replace("""conv2""" , """convolution2""")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
a__ : List[str] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""")
if "pretrained.act_postprocess2.0.project.0" in name:
a__ : Optional[int] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""")
if "pretrained.act_postprocess3.0.project.0" in name:
a__ : Any = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""")
if "pretrained.act_postprocess4.0.project.0" in name:
a__ : Optional[int] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
a__ : int = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""")
if "pretrained.act_postprocess1.4" in name:
a__ : Optional[int] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""")
if "pretrained.act_postprocess2.3" in name:
a__ : List[Any] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""")
if "pretrained.act_postprocess2.4" in name:
a__ : Dict = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""")
if "pretrained.act_postprocess3.3" in name:
a__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""")
if "pretrained.act_postprocess4.3" in name:
a__ : int = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""")
if "pretrained.act_postprocess4.4" in name:
a__ : Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""")
if "pretrained" in name:
a__ : List[str] = name.replace("""pretrained""" , """dpt""")
if "bn" in name:
a__ : int = name.replace("""bn""" , """batch_norm""")
if "head" in name:
a__ : Optional[Any] = name.replace("""head""" , """head.head""")
if "encoder.norm" in name:
a__ : Optional[int] = name.replace("""encoder.norm""" , """layernorm""")
if "auxlayer" in name:
a__ : Optional[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""")
if "backbone" in name:
a__ : int = name.replace("""backbone""" , """backbone.bit.encoder""")
if ".." in name:
a__ : str = name.replace("""..""" , """.""")
if "stem.conv" in name:
a__ : Optional[int] = name.replace("""stem.conv""" , """bit.embedder.convolution""")
if "blocks" in name:
a__ : Optional[int] = name.replace("""blocks""" , """layers""")
if "convolution" in name and "backbone" in name:
a__ : Dict = name.replace("""convolution""" , """conv""")
if "layer" in name and "backbone" in name:
a__ : Tuple = name.replace("""layer""" , """layers""")
if "backbone.bit.encoder.bit" in name:
a__ : Optional[Any] = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""")
if "embedder.conv" in name:
a__ : int = name.replace("""embedder.conv""" , """embedder.convolution""")
if "backbone.bit.encoder.stem.norm" in name:
a__ : Union[str, Any] = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""")
return name
def lowerCAmelCase_ ( _lowercase : Optional[int] , _lowercase : Union[str, Any]) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ : Any = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''')
a__ : int = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
a__ : Any = in_proj_weight[: config.hidden_size, :]
a__ : Dict = in_proj_bias[: config.hidden_size]
a__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
a__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a__ : Union[str, Any] = Image.open(requests.get(_lowercase , stream=_lowercase).raw)
return im
@torch.no_grad()
def lowerCAmelCase_ ( _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : Dict , _lowercase : Optional[Any]) -> int:
"""simple docstring"""
a__ , a__ : int = get_dpt_config(_lowercase)
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
a__ : Union[str, Any] = torch.load(_lowercase , map_location="""cpu""")
# remove certain keys
remove_ignore_keys_(_lowercase)
# rename keys
for key in state_dict.copy().keys():
a__ : int = state_dict.pop(_lowercase)
a__ : str = val
# read in qkv matrices
read_in_q_k_v(_lowercase , _lowercase)
# load HuggingFace model
a__ : List[Any] = DPTForSemanticSegmentation(_lowercase) if """ade""" in checkpoint_url else DPTForDepthEstimation(_lowercase)
model.load_state_dict(_lowercase)
model.eval()
# Check outputs on an image
a__ : List[Any] = 480 if """ade""" in checkpoint_url else 384
a__ : str = DPTImageProcessor(size=_lowercase)
a__ : Tuple = prepare_img()
a__ : List[str] = image_processor(_lowercase , return_tensors="""pt""")
# forward pass
a__ : Any = model(**_lowercase).logits if """ade""" in checkpoint_url else model(**_lowercase).predicted_depth
if show_prediction:
a__ : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=_lowercase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255).show()
if pytorch_dump_folder_path is not None:
Path(_lowercase).mkdir(exist_ok=_lowercase)
print(F'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowercase)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowercase)
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""")
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""")
if __name__ == "__main__":
_lowercase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_lowercase : str =parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 170 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : int) -> Optional[int]:
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : int=0) -> Optional[int]:
'''simple docstring'''
return sorted(UpperCamelCase_, key=lambda UpperCamelCase_: x[column])
def _A ( UpperCamelCase_ : int, UpperCamelCase_ : Any, UpperCamelCase_ : Optional[int]=float("inf")) -> Union[str, Any]:
'''simple docstring'''
for i in range(points_counts - 1):
for j in range(i + 1, UpperCamelCase_):
__lowercase = euclidean_distance_sqr(points[i], points[j])
if current_dis < min_dis:
__lowercase = current_dis
return min_dis
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : Dict, UpperCamelCase_ : Dict=float("inf")) -> List[str]:
'''simple docstring'''
for i in range(min(6, points_counts - 1), UpperCamelCase_):
for j in range(max(0, i - 6), UpperCamelCase_):
__lowercase = euclidean_distance_sqr(points[i], points[j])
if current_dis < min_dis:
__lowercase = current_dis
return min_dis
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any], UpperCamelCase_ : Tuple) -> Tuple:
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(UpperCamelCase_, UpperCamelCase_)
# recursion
__lowercase = points_counts // 2
__lowercase = closest_pair_of_points_sqr(
UpperCamelCase_, points_sorted_on_y[:mid], UpperCamelCase_)
__lowercase = closest_pair_of_points_sqr(
UpperCamelCase_, points_sorted_on_y[mid:], points_counts - mid)
__lowercase = min(UpperCamelCase_, UpperCamelCase_)
__lowercase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0]) < closest_pair_dis:
cross_strip.append(UpperCamelCase_)
__lowercase = dis_between_closest_in_strip(
UpperCamelCase_, len(UpperCamelCase_), UpperCamelCase_)
return min(UpperCamelCase_, UpperCamelCase_)
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : Tuple) -> Optional[Any]:
'''simple docstring'''
__lowercase = column_based_sort(UpperCamelCase_, column=0)
__lowercase = column_based_sort(UpperCamelCase_, column=1)
return (
closest_pair_of_points_sqr(
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
) ** 0.5
if __name__ == "__main__":
_a = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 144 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["pixel_values"]
def __init__( self : int, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Dict[str, int]] = None, UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, **UpperCAmelCase__ : str, ):
super().__init__(**UpperCAmelCase__ )
__lowercase = size if size is not None else {"shortest_edge": 2_5_6}
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
__lowercase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowercase = get_size_dict(UpperCAmelCase__ )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : int, UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Dict[str, int], UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Dict, ):
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowercase = get_resize_output_image_size(UpperCAmelCase__, size=size["shortest_edge"], default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Dict[str, int], UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Dict, ):
__lowercase = get_size_dict(UpperCAmelCase__ )
return center_crop(UpperCAmelCase__, size=(size["height"], size["width"]), data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : float, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Union[str, Any] ):
return rescale(UpperCAmelCase__, scale=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Union[float, List[float]], UpperCAmelCase__ : Union[float, List[float]], UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : int, ):
return normalize(UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Any, UpperCAmelCase__ : ImageInput, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : PILImageResampling = None, UpperCAmelCase__ : bool = None, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[float] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[str, TensorType]] = None, UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST, **UpperCAmelCase__ : Optional[int], ):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(UpperCAmelCase__ )
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=UpperCAmelCase__, size=UpperCAmelCase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=UpperCAmelCase__, scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__ ) for image in images]
__lowercase = [to_channel_dimension_format(UpperCAmelCase__, UpperCAmelCase__ ) for image in images]
__lowercase = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__, tensor_type=UpperCAmelCase__ )
| 144 | 1 |
"""simple docstring"""
from math import factorial
lowercase__ = {str(digit): factorial(digit) for digit in range(10)}
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
if not isinstance(__A , __A ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__A ) )
def __a ( _SCREAMING_SNAKE_CASE = 60 , _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
if not isinstance(__A , __A ) or not isinstance(__A , __A ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
a__: Optional[int] = 0
# the cached sizes of the previous chains
a__: int = {}
for start_chain_element in range(1 , __A ):
# The temporary set will contain the elements of the chain
a__: Union[str, Any] = set()
a__: List[str] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
a__: Tuple = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__A )
chain_set_length += 1
a__: Any = digit_factorial_sum(__A )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
a__: Dict = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution()}")
| 290 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : str = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'lilt'
def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a=None , a=4 , a=10_24 , **a , ):
super().__init__(pad_token_id=a , **a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = classifier_dropout
UpperCamelCase__ = channel_shrink_ratio
UpperCamelCase__ = max_ad_position_embeddings
| 80 | 0 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Optional[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Optional[Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : Union[str, Any] = 8
else:
UpperCAmelCase_ : List[str] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ) -> str:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase_ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase_ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Optional[Any] = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : str = int(config["seed"] )
UpperCAmelCase_ : Tuple = int(config["batch_size"] )
set_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ : Tuple = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ : Tuple = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Tuple = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase_ : List[str] = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split("." )[0]
accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase_ : Dict = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_SCREAMING_SNAKE_CASE ),
"epoch": epoch,
} , step=_SCREAMING_SNAKE_CASE , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def a__ ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_SCREAMING_SNAKE_CASE , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 67 | 1 |
def a ( A__ : int , A__ : int ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =''
for i in table:
res += inp[i - 1]
return res
def a ( A__ : str ) -> Tuple:
"""simple docstring"""
return data[1:] + data[0]
def a ( A__ : Any , A__ : Union[str, Any] ) -> str:
"""simple docstring"""
_lowercase =''
for i in range(len(A__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a ( A__ : Optional[int] , A__ : Dict ) -> int:
"""simple docstring"""
_lowercase =int('0b' + data[0] + data[-1] , 2 )
_lowercase =int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def a ( A__ : List[Any] , A__ : Optional[int] , A__ : Tuple , A__ : Tuple , A__ : int ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =message[:4]
_lowercase =message[4:]
_lowercase =apply_table(A__ , A__ )
_lowercase =xor(A__ , A__ )
_lowercase =apply_sbox(A__ , temp[:4] ) # noqa: E741
_lowercase =apply_sbox(A__ , temp[4:] )
_lowercase ='0' * (2 - len(A__ )) + l # noqa: E741
_lowercase ='0' * (2 - len(A__ )) + r
_lowercase =apply_table(l + r , A__ )
_lowercase =xor(A__ , A__ )
return temp + right
if __name__ == "__main__":
lowercase_ = input('Enter 10 bit key: ')
lowercase_ = input('Enter 8 bit message: ')
lowercase_ = [6, 3, 7, 4, 8, 5, 1_0, 9]
lowercase_ = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
lowercase_ = [2, 4, 3, 1]
lowercase_ = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ = apply_table(key, paa_table)
lowercase_ = temp[:5]
lowercase_ = temp[5:]
lowercase_ = left_shift(left)
lowercase_ = left_shift(right)
lowercase_ = apply_table(left + right, pa_table)
lowercase_ = left_shift(left)
lowercase_ = left_shift(right)
lowercase_ = left_shift(left)
lowercase_ = left_shift(right)
lowercase_ = apply_table(left + right, pa_table)
# encryption
lowercase_ = apply_table(message, IP)
lowercase_ = function(expansion, sa, sa, keya, temp)
lowercase_ = temp[4:] + temp[:4]
lowercase_ = function(expansion, sa, sa, keya, temp)
lowercase_ = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
lowercase_ = apply_table(CT, IP)
lowercase_ = function(expansion, sa, sa, keya, temp)
lowercase_ = temp[4:] + temp[:4]
lowercase_ = function(expansion, sa, sa, keya, temp)
lowercase_ = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 205 |
def a ( A__ : int = 1000000 ) -> int:
"""simple docstring"""
_lowercase =1
_lowercase =1
_lowercase ={1: 1}
for inputa in range(2 , A__ ):
_lowercase =0
_lowercase =inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowercase =(3 * number) + 1
counter += 1
if inputa not in counters:
_lowercase =counter
if counter > pre_counter:
_lowercase =inputa
_lowercase =counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 205 | 1 |
_lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
_lowerCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A__ = (
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {", ".join(lowercase_ )}"""
)
raise ValueError(lowercase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowerCamelCase : Optional[Any] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = SpeechTaTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = SpeechTaTokenizer(lowercase )
A__ = AddedToken("<mask>" , lstrip=lowercase , rstrip=lowercase )
A__ = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = "this is a test"
A__ = "this is a test"
return input_text, output_text
def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ = self.get_input_output_texts(lowercase )
A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
A__ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
return text, ids
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = "<pad>"
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(lowercase ) , 81 )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A__ = tokenizer.vocab_size
A__ = len(lowercase )
self.assertNotEqual(lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A__ = ["aaaaa bbbbbb", "cccccccccdddddddd"]
A__ = tokenizer.add_tokens(lowercase )
A__ = tokenizer.vocab_size
A__ = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size + len(lowercase ) )
A__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
A__ = tokenizer.add_special_tokens(lowercase )
A__ = tokenizer.vocab_size
A__ = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size_a + len(lowercase ) )
A__ = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.get_tokenizer()
A__ = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(lowercase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
A__ = tokenizer.convert_tokens_to_ids(lowercase )
# fmt: off
self.assertListEqual(lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
A__ = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
A__ = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowercase , )
| 68 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'gpt_neox_japanese'
def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = rotary_pct
A__ = rotary_emb_base
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_cache
A__ = attention_dropout
A__ = hidden_dropout
| 68 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase_ = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = True, ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__, __magic_name__ ) )]
if identifier is not None:
UpperCamelCase__ : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__, __magic_name__ ):
for n_ in n_identifier:
UpperCamelCase__ : Any = [file for file in files if n_ not in file]
else:
UpperCamelCase__ : Any = [file for file in files if n_identifier not in file]
UpperCamelCase__ : Tuple = ignore_files or []
ignore_files.append('''__init__.py''' )
UpperCamelCase__ : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''', __magic_name__ )
if only_modules:
UpperCamelCase__ : Dict = file.split('''.''' )[0]
try:
UpperCamelCase__ : Dict = getattr(__magic_name__, __magic_name__ )
UpperCamelCase__ : Dict = doctest.DocTestSuite(__magic_name__ )
UpperCamelCase__ : List[Any] = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ), 0 )
except AttributeError:
logger.info(f"{module_identifier} is not a module." )
else:
UpperCamelCase__ : Optional[Any] = doctest.testfile(str('''..''' / directory / file ), optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed, 0 )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = Path('''src/transformers''' )
UpperCamelCase__ : Any = '''modeling'''
UpperCamelCase__ : int = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(__magic_name__, identifier=__magic_name__, ignore_files=__magic_name__ )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[str] = Path('''src/transformers''' )
UpperCamelCase__ : List[str] = '''tokenization'''
self.analyze_directory(__magic_name__, identifier=__magic_name__ )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = Path('''src/transformers''' )
UpperCamelCase__ : Dict = '''configuration'''
self.analyze_directory(__magic_name__, identifier=__magic_name__ )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any = Path('''src/transformers''' )
UpperCamelCase__ : List[Any] = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(__magic_name__, n_identifier=__magic_name__ )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Any = Path('''docs/source''' )
UpperCamelCase__ : Any = ['''favicon.ico''']
self.analyze_directory(__magic_name__, ignore_files=__magic_name__, only_modules=__magic_name__ )
| 247 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Optional[Any]:
UpperCamelCase__ : Any = test_results.split(''' ''' )
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : int = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCamelCase__ : List[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> Tuple:
UpperCamelCase__ : List[Any] = {}
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : int = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , __UpperCAmelCase ):
UpperCamelCase__ : Any = True
UpperCamelCase__ : Optional[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
UpperCamelCase__ : List[Any] = line
UpperCamelCase__ : List[Any] = False
return failures
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = title
UpperCamelCase__ : Tuple = doc_test_results['''time_spent'''].split(''',''' )[0]
UpperCamelCase__ : Optional[Any] = doc_test_results['''success''']
UpperCamelCase__ : str = doc_test_results['''failures''']
UpperCamelCase__ : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCamelCase__ : List[Any] = doc_test_results
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = [self._time_spent]
UpperCamelCase__ : str = 0
for time in time_spent:
UpperCamelCase__ : List[Any] = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__magic_name__ ) == 1:
UpperCamelCase__ : List[Any] = [0, 0, time_parts[0]]
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[str] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(__magic_name__ )}h{int(__magic_name__ )}m{int(__magic_name__ )}s"
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = 40
UpperCamelCase__ : Tuple = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__magic_name__, __magic_name__ )}
UpperCamelCase__ : List[str] = ''''''
for category, failures in category_failures.items():
if len(__magic_name__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__magic_name__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(__magic_name__ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text='''There was an issue running the tests.''', blocks=__magic_name__, )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
UpperCamelCase__ : List[str] = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
UpperCamelCase__ : Optional[Any] = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], blocks=self.payload, text=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ''''''
for key, value in failures.items():
UpperCamelCase__ : List[Any] = value[:200] + ''' [Truncated]''' if len(__magic_name__ ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
UpperCamelCase__ : Union[str, Any] = job_name
UpperCamelCase__ : Any = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
UpperCamelCase__ : Union[str, Any] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
UpperCamelCase__ : Optional[int] = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
UpperCamelCase__ : Optional[int] = sorted(self.doc_test_results.items(), key=lambda __magic_name__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
UpperCamelCase__ : Any = f"*Num failures* :{len(job_result['failed'] )} \n"
UpperCamelCase__ : Optional[Any] = job_result['''failures''']
UpperCamelCase__ : Optional[Any] = self.get_reply_blocks(__magic_name__, __magic_name__, __magic_name__, text=__magic_name__ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text=f"Results for {job}", blocks=__magic_name__, thread_ts=self.thread_ts['''ts'''], )
time.sleep(1 )
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase__ : Any = os.environ['''GITHUB_RUN_ID''']
UpperCamelCase__ : Tuple = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
UpperCamelCase__ : Optional[int] = requests.get(__UpperCAmelCase ).json()
UpperCamelCase__ : List[Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCamelCase__ : List[Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCAmelCase ):
UpperCamelCase__ : Any = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __UpperCAmelCase )
return {}
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> List[Any]:
UpperCamelCase__ : Optional[int] = {}
if os.path.exists(__UpperCAmelCase ):
UpperCamelCase__ : Dict = os.listdir(__UpperCAmelCase )
for file in files:
try:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , encoding='''utf-8''' ) as f:
UpperCamelCase__ : int = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(__UpperCAmelCase , __UpperCAmelCase )}." ) from e
return _artifact
def lowerCAmelCase_ ( ) -> str:
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = name
UpperCamelCase__ : int = []
def __str__( self ) -> Tuple:
"""simple docstring"""
return self.name
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
UpperCamelCase__ : Dict[str, Artifact] = {}
UpperCamelCase__ : Union[str, Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCamelCase__ : Optional[int] = directory
if artifact_name not in _available_artifacts:
UpperCamelCase__ : Union[str, Any] = Artifact(__UpperCAmelCase )
_available_artifacts[artifact_name].add_path(__UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase_ = get_job_links()
UpperCAmelCase_ = retrieve_available_artifacts()
UpperCAmelCase_ = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase_ = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase_ = github_actions_job_links.get('run_doctests')
UpperCAmelCase_ = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
UpperCAmelCase_ = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = handle_test_results(artifact['stats'])
UpperCAmelCase_ = failed
UpperCAmelCase_ = success
UpperCAmelCase_ = time_spent[1:-1] + ', '
UpperCAmelCase_ = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
UpperCAmelCase_ = line.replace('FAILED ', '')
UpperCAmelCase_ = line.split()[0].replace('\n', '')
if "::" in line:
UpperCAmelCase_ , UpperCAmelCase_ = line.split('::')
else:
UpperCAmelCase_ , UpperCAmelCase_ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase_ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase_ = all_failures[test] if test in all_failures else 'N/A'
UpperCAmelCase_ = failure
break
UpperCAmelCase_ = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 247 | 1 |