code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger("transformers.models.speecht5")
_SCREAMING_SNAKE_CASE : int = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
_SCREAMING_SNAKE_CASE : Tuple = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
_SCREAMING_SNAKE_CASE : List[Any] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
_SCREAMING_SNAKE_CASE : int = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
_SCREAMING_SNAKE_CASE : List[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE : List[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE : int = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : str = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
_SCREAMING_SNAKE_CASE : str = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
_SCREAMING_SNAKE_CASE : Tuple = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
_SCREAMING_SNAKE_CASE : List[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def UpperCamelCase_( snake_case : Any , snake_case : Any , snake_case : Union[str, Any] , snake_case : Any , snake_case : str ):
'''simple docstring'''
for attribute in key.split("." ):
snake_case_ = getattr(snake_case , snake_case )
if weight_type is not None:
snake_case_ = getattr(snake_case , snake_case ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "running_mean":
snake_case_ = value
elif weight_type == "running_var":
snake_case_ = value
elif weight_type == "num_batches_tracked":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def UpperCamelCase_( snake_case : Tuple , snake_case : Optional[int] ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case_ , snake_case_ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase_( snake_case : int , snake_case : List[Any] , snake_case : str ):
'''simple docstring'''
snake_case_ = []
if task == "s2t":
snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder
snake_case_ = MAPPING_S2T
snake_case_ = IGNORE_KEYS_S2T
elif task == "t2s":
snake_case_ = None
snake_case_ = MAPPING_T2S
snake_case_ = IGNORE_KEYS_T2S
elif task == "s2s":
snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder
snake_case_ = MAPPING_S2S
snake_case_ = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(snake_case , snake_case ):
logger.info(f'{name} was ignored' )
continue
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == "group" , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
snake_case_ , snake_case_ = key.split(".*." )
if prefix in name and suffix in name:
snake_case_ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(snake_case )[0].split("." )[-2]
snake_case_ = mapped_key.replace("*" , snake_case )
if "weight_g" in name:
snake_case_ = "weight_g"
elif "weight_v" in name:
snake_case_ = "weight_v"
elif "bias" in name:
snake_case_ = "bias"
elif "weight" in name:
snake_case_ = "weight"
elif "running_mean" in name:
snake_case_ = "running_mean"
elif "running_var" in name:
snake_case_ = "running_var"
elif "num_batches_tracked" in name:
snake_case_ = "num_batches_tracked"
else:
snake_case_ = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : Tuple , snake_case : List[str] , snake_case : Dict , snake_case : Dict ):
'''simple docstring'''
snake_case_ = full_name.split("conv_layers." )[-1]
snake_case_ = name.split("." )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case )
@torch.no_grad()
def UpperCamelCase_( snake_case : Dict , snake_case : int , snake_case : Dict , snake_case : Union[str, Any]=None , snake_case : Dict=None , snake_case : List[Any]=None , ):
'''simple docstring'''
if config_path is not None:
snake_case_ = SpeechTaConfig.from_pretrained(snake_case )
else:
snake_case_ = SpeechTaConfig()
if task == "s2t":
snake_case_ = config.max_text_positions
snake_case_ = SpeechTaForSpeechToText(snake_case )
elif task == "t2s":
snake_case_ = 1_8_7_6
snake_case_ = 6_0_0
snake_case_ = config.max_speech_positions
snake_case_ = SpeechTaForTextToSpeech(snake_case )
elif task == "s2s":
snake_case_ = 1_8_7_6
snake_case_ = config.max_speech_positions
snake_case_ = SpeechTaForSpeechToSpeech(snake_case )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
snake_case_ = SpeechTaTokenizer(snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
snake_case_ = AddedToken("<mask>" , lstrip=snake_case , rstrip=snake_case )
snake_case_ = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
snake_case_ = SpeechTaFeatureExtractor()
snake_case_ = SpeechTaProcessor(tokenizer=snake_case , feature_extractor=snake_case )
processor.save_pretrained(snake_case )
snake_case_ = torch.load(snake_case )
recursively_load_weights(fairseq_checkpoint["model"] , snake_case , snake_case )
model.save_pretrained(snake_case )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 85 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() )
__magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
"""simple docstring"""
if metric == "rouge2":
__magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__magic_name__ : int = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
__magic_name__ : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int:
__magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
__magic_name__ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__magic_name__ : List[Any] = od / 'test_results.txt'
__magic_name__ : Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
__magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , 'a+' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__magic_name__ : Optional[Any] = metrics[key]
if isinstance(_A , torch.Tensor ):
__magic_name__ : Tuple = val.item()
__magic_name__ : int = F'{key}: {val:.6f}\n'
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__magic_name__ : str = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_A )
@rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple:
try:
__magic_name__ : str = pl_module.model.model.num_parameters()
except AttributeError:
__magic_name__ : List[str] = pl_module.model.num_parameters()
__magic_name__ : List[Any] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , 'test' )
@rank_zero_only
def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 331 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 365 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = 0
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ ).to_dict()
config_dict.pop('image_processor_type' )
UpperCamelCase = CLIPImageProcessor(**UpperCamelCase__ )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
config.save_pretrained(UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'clip-base is not a local folder and is not a valid model identifier' ):
UpperCamelCase = AutoImageProcessor.from_pretrained('clip-base' )
def A ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ , revision='aaaaaa' )
def A ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCamelCase = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def A ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def A ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register('custom' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(UpperCamelCase__ ) / 'preprocessor_config.json'
UpperCamelCase = Path(UpperCamelCase__ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(UpperCamelCase__ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(UpperCamelCase__ , 'w' ) )
UpperCamelCase = CustomImageProcessor.from_pretrained(UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
UpperCamelCase = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A ( self : Optional[int] ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
try:
AutoConfig.register('custom' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(UpperCamelCase__ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 249 | 0 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE : Dict = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
SCREAMING_SNAKE_CASE : Tuple = f"""{src_lang}-{tgt_lang}"""
SCREAMING_SNAKE_CASE : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(_lowercase , exist_ok=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(_lowercase , '''README.md''' )
print(f"""Generating {path}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(_lowercase )
# make sure we are under the root of the project
__UpperCamelCase : List[str] = Path(__file__).resolve().parent.parent.parent
__UpperCamelCase : int = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = model_name.split('-')
__UpperCamelCase : Dict = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 182 | import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
@staticmethod
def __A ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase):
UpperCamelCase_ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __A ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
SCREAMING_SNAKE_CASE : List[str] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __A ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = object_detector(examples[0] , threshold=0.0 )
SCREAMING_SNAKE_CASE : Tuple = len(UpperCamelCase__ )
self.assertGreater(UpperCamelCase__ , 0 )
self.assertEqual(
UpperCamelCase__ , [
{
'''score''': ANY(UpperCamelCase__ ),
'''label''': ANY(UpperCamelCase__ ),
'''box''': {'''xmin''': ANY(UpperCamelCase__ ), '''ymin''': ANY(UpperCamelCase__ ), '''xmax''': ANY(UpperCamelCase__ ), '''ymax''': ANY(UpperCamelCase__ )},
}
for i in range(UpperCamelCase__ )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
SCREAMING_SNAKE_CASE : str = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = pipeline('''zero-shot-object-detection''' )
SCREAMING_SNAKE_CASE : Optional[int] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __A ( self : str ):
'''simple docstring'''
pass
@require_torch
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 0.2
SCREAMING_SNAKE_CASE : Optional[int] = pipeline('''zero-shot-object-detection''' )
SCREAMING_SNAKE_CASE : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline('''zero-shot-object-detection''' )
SCREAMING_SNAKE_CASE : List[str] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 182 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCAmelCase: Any = None
lowerCAmelCase: Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase: List[str] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase: Any = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
lowerCAmelCase: List[Any] = {
'camembert-base': 5_1_2,
}
lowerCAmelCase: Union[str, Any] = '▁'
class a__( lowercase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ['input_ids', 'attention_mask']
lowercase__ = CamembertTokenizer
def __init__( self : Optional[Any] , __snake_case : Dict=None , __snake_case : Any=None , __snake_case : List[Any]="<s>" , __snake_case : List[str]="</s>" , __snake_case : List[str]="</s>" , __snake_case : Dict="<s>" , __snake_case : str="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , **__snake_case : str , ):
# Mask token behave like a normal word, i.e. include the space before it
a : List[str] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase , )
a : Dict = vocab_file
a : Tuple = False if not self.vocab_file else True
def lowercase_ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : Optional[int] = [self.cls_token_id]
a : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : Optional[Any] = [self.sep_token_id]
a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : Any , __snake_case : str , __snake_case : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Any = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ):
copyfile(self.vocab_file , _UpperCamelCase )
return (out_vocab_file,) | 358 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowerCamelCase__ ( _A=None , _A=None ):
return field(default_factory=lambda: default , metadata=_A )
@dataclass
class a__:
lowercase__ = field(
metadata={"""help""": """The csv file to plot."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
lowercase__ = list_field(
default=lowerCamelCase__ , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def lowerCamelCase__ ( _A ):
try:
int(_A )
return True
except ValueError:
return False
def lowerCamelCase__ ( _A ):
try:
float(_A )
return True
except ValueError:
return False
class a__:
def __init__( self : Union[str, Any] , __snake_case : Optional[int] ):
a : int = args
a : Dict = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
a : List[Any] = csv.DictReader(__snake_case )
for row in reader:
a : Dict = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
a : Dict = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
a : int = float(row['result'] )
def lowercase_ ( self : int ):
a , a : Dict = plt.subplots()
a : int = 'Time usage' if self.args.is_time else 'Memory usage'
a : int = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
a : Dict = sorted(set(self.result_dict[model_name]['bsz'] ) )
a : Any = sorted(set(self.result_dict[model_name]['seq_len'] ) )
a : Any = self.result_dict[model_name]['result']
((a) , (a)) : Optional[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
a : int = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
a : Dict = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__snake_case , )
else:
a : int = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((a) , (a)) : Optional[int] = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
a : List[str] = np.asarray(__snake_case , __snake_case )[: len(__snake_case )]
plt.scatter(
__snake_case , __snake_case , label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(__snake_case , __snake_case , '--' )
title_str += F""" {label_model_name} vs."""
a : List[Any] = title_str[:-4]
a : Optional[Any] = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(__snake_case )
plt.xlabel(__snake_case )
plt.ylabel(__snake_case )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowerCamelCase__ ( ):
a : Dict = HfArgumentParser(_A )
a : List[str] = parser.parse_args_into_dataclasses()[0]
a : Dict = Plot(args=_A )
plot.plot()
if __name__ == "__main__":
main() | 96 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :Optional[int] = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 331 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
if "cls_token" in name:
lowercase_ = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowercase_ = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowercase_ = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowercase_ = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowercase_ = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase_ = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowercase_ = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowercase_ = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowercase_ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase_ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase_ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase_ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase_ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase_ = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowercase_ = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowercase_ = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowercase_ = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowercase_ = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowercase_ = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase_ = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowercase_ = key.split(""".""" )
lowercase_ = int(key_split[1] )
if "decoder_blocks" in key:
lowercase_ = config.decoder_hidden_size
lowercase_ = "decoder.decoder_layers."
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
elif "bias" in key:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
else:
lowercase_ = config.hidden_size
lowercase_ = "vit.encoder.layer."
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
elif "bias" in key:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
else:
lowercase_ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMAEConfig()
if "large" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
elif "huge" in checkpoint_url:
lowercase_ = 14
lowercase_ = 12_80
lowercase_ = 51_20
lowercase_ = 32
lowercase_ = 16
lowercase_ = ViTMAEForPreTraining(_UpperCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="""cpu""" )["model"]
lowercase_ = ViTMAEImageProcessor(size=config.image_size )
lowercase_ = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowercase_ = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
lowercase_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
lowercase_ = ViTMAEImageProcessor(size=config.image_size )
lowercase_ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**_UpperCAmelCase )
lowercase_ = outputs.logits
if "large" in checkpoint_url:
lowercase_ = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
lowercase_ = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
lowercase_ = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Dict = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 365 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _a ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
A = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCAmelCase_ ():
"""simple docstring"""
if os.name == "nt":
UpperCAmelCase_: Optional[int] = CursorInfo()
UpperCAmelCase_: Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
UpperCAmelCase_: Optional[Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowerCAmelCase_ ():
"""simple docstring"""
if os.name == "nt":
UpperCAmelCase_: List[str] = CursorInfo()
UpperCAmelCase_: int = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
UpperCAmelCase_: Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase__ , ctypes.byref(lowerCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowerCAmelCase_ ():
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 147 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a : Optional[Any] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowercase__ = logging.get_logger(__name__)
# General docstring
lowercase__ = """RegNetConfig"""
# Base docstring
lowercase__ = """facebook/regnet-y-040"""
lowercase__ = [1, 1088, 7, 7]
# Image classification docstring
lowercase__ = """facebook/regnet-y-040"""
lowercase__ = """tabby, tabby cat"""
lowercase__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCamelCase : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowerCamelCase : List[str] = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
_lowerCamelCase : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
_lowerCamelCase : Optional[Any] = ACTaFN[activation] if activation is not None else tf.identity
def A_ ( self , lowercase ):
_lowerCamelCase : Any = self.convolution(self.padding(lowercase ) )
_lowerCamelCase : List[str] = self.normalization(lowercase )
_lowerCamelCase : Tuple = self.activation(lowercase )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
super().__init__(**lowercase )
_lowerCamelCase : int = config.num_channels
_lowerCamelCase : int = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[Any] = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCamelCase : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
_lowerCamelCase : Dict = self.embedder(lowercase )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
super().__init__(**lowercase )
_lowerCamelCase : List[str] = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
_lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def A_ ( self , lowercase , lowercase = False ):
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
super().__init__(**lowercase )
_lowerCamelCase : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
_lowerCamelCase : Union[str, Any] = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def A_ ( self , lowercase ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_lowerCamelCase : Tuple = self.pooler(lowercase )
for layer_module in self.attention:
_lowerCamelCase : List[str] = layer_module(lowercase )
_lowerCamelCase : Tuple = hidden_state * pooled
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
super().__init__(**lowercase )
_lowerCamelCase : Dict = in_channels != out_channels or stride != 1
_lowerCamelCase : Dict = max(1 , out_channels // config.groups_width )
_lowerCamelCase : Any = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCamelCase : str = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
_lowerCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[int] = hidden_state
for layer_module in self.layers:
_lowerCamelCase : Optional[int] = layer_module(lowercase )
_lowerCamelCase : str = self.shortcut(lowercase )
hidden_state += residual
_lowerCamelCase : Tuple = self.activation(lowercase )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
super().__init__(**lowercase )
_lowerCamelCase : Tuple = in_channels != out_channels or stride != 1
_lowerCamelCase : List[Any] = max(1 , out_channels // config.groups_width )
_lowerCamelCase : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
_lowerCamelCase : List[str] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
_lowerCamelCase : List[str] = ACTaFN[config.hidden_act]
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[Any] = hidden_state
for layer_module in self.layers:
_lowerCamelCase : Optional[Any] = layer_module(lowercase )
_lowerCamelCase : Any = self.shortcut(lowercase )
hidden_state += residual
_lowerCamelCase : Tuple = self.activation(lowercase )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
super().__init__(**lowercase )
_lowerCamelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_lowerCamelCase : List[Any] = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def A_ ( self , lowercase ):
for layer_module in self.layers:
_lowerCamelCase : Dict = layer_module(lowercase )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
super().__init__(**lowercase )
_lowerCamelCase : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
_lowerCamelCase : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def A_ ( self , lowercase , lowercase = False , lowercase = True ):
_lowerCamelCase : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCamelCase : List[Any] = hidden_states + (hidden_state,)
_lowerCamelCase : Any = stage_module(lowercase )
if output_hidden_states:
_lowerCamelCase : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase__ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
super().__init__(**lowercase )
_lowerCamelCase : Dict = config
_lowerCamelCase : Optional[Any] = TFRegNetEmbeddings(lowercase , name='embedder' )
_lowerCamelCase : int = TFRegNetEncoder(lowercase , name='encoder' )
_lowerCamelCase : Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
_lowerCamelCase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : Dict = self.embedder(lowercase , training=lowercase )
_lowerCamelCase : List[Any] = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
_lowerCamelCase : Optional[Any] = encoder_outputs[0]
_lowerCamelCase : str = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
_lowerCamelCase : Any = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
_lowerCamelCase : List[str] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCamelCase : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = RegNetConfig
lowerCamelCase__ = """regnet"""
lowerCamelCase__ = """pixel_values"""
@property
def A_ ( self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowercase__ = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowercase__ = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""", lowercase, )
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
super().__init__(lowercase , *lowercase , **lowercase )
_lowerCamelCase : Any = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
_lowerCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : List[Any] = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""", lowercase, )
class lowerCAmelCase__ ( lowercase, lowercase ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
super().__init__(lowercase , *lowercase , **lowercase )
_lowerCamelCase : List[Any] = config.num_labels
_lowerCamelCase : int = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
_lowerCamelCase : List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
_lowerCamelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : Tuple = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
_lowerCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
_lowerCamelCase : Any = self.classifier[0](lowercase )
_lowerCamelCase : Dict = self.classifier[1](lowercase )
_lowerCamelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
_lowerCamelCase : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states ) | 12 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 12 | 1 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase_ : int = start
lowercase_ : List[Any] = end
lowercase_ : Tuple = val
lowercase_ : List[str] = (start + end) // 2
lowercase_ : Dict = left
lowercase_ : Dict = right
def __repr__( self ):
"""simple docstring"""
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = collection
lowercase_ : int = function
if self.collection:
lowercase_ : List[Any] = self._build_tree(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self._update_tree(self.root , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self._query_range(self.root , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.collection[start] )
lowercase_ : Union[str, Any] = (start + end) // 2
lowercase_ : int = self._build_tree(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self._build_tree(mid + 1 , __SCREAMING_SNAKE_CASE )
return SegmentTreeNode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.fn(left.val , right.val ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if node.start == i and node.end == i:
lowercase_ : List[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
self._update_tree(node.right , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = self.fn(node.left.val , node.right.val )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __SCREAMING_SNAKE_CASE , node.mid ) , self._query_range(node.right , node.mid + 1 , __SCREAMING_SNAKE_CASE ) , )
else:
# range in right child tree
return self._query_range(node.right , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
if self.root is not None:
lowercase_ : Dict = Queue()
queue.put(self.root )
while not queue.empty():
lowercase_ : Optional[Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 5_0)
_lowercase : List[Any] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 93 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : Tuple = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : List[str] = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_lowercase : List[str] = {
"facebook/m2m100_418M": 1_0_2_4,
}
# fmt: off
_lowercase : Tuple = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="m2m100" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=8 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ : List[Any] = language_codes
lowercase_ : Optional[int] = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowercase_ : List[Any] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
lowercase_ : Union[str, Any] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__SCREAMING_SNAKE_CASE )
for lang_code in fairseq_language_code
if self.get_lang_token(__SCREAMING_SNAKE_CASE ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , language_codes=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : int = vocab_file
lowercase_ : Any = load_json(__SCREAMING_SNAKE_CASE )
lowercase_ : str = {v: k for k, v in self.encoder.items()}
lowercase_ : Optional[int] = spm_file
lowercase_ : Any = load_spm(__SCREAMING_SNAKE_CASE , self.sp_model_kwargs )
lowercase_ : List[Any] = len(self.encoder )
lowercase_ : Dict = {
self.get_lang_token(__SCREAMING_SNAKE_CASE ): self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Optional[int] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()}
lowercase_ : Tuple = src_lang if src_lang is not None else '''en'''
lowercase_ : Optional[int] = tgt_lang
lowercase_ : Any = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowercase_ : Dict = num_madeup_words
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder[self.unk_token] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = []
lowercase_ : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowercase_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = [1] * len(self.prefix_tokens )
lowercase_ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.__dict__.copy()
lowercase_ : List[Any] = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : List[Any] = {}
lowercase_ : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Tuple = Path(__SCREAMING_SNAKE_CASE )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
lowercase_ : Dict = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowercase_ : Dict = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __SCREAMING_SNAKE_CASE )
if os.path.abspath(self.spm_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.spm_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : int = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (str(__SCREAMING_SNAKE_CASE ), str(__SCREAMING_SNAKE_CASE ))
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = src_lang
lowercase_ : List[str] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Tuple = src_lang
lowercase_ : Any = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = self.get_lang_id(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = self.get_lang_token(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.lang_token_to_id[lang_token]
lowercase_ : Optional[Any] = [self.cur_lang_id]
lowercase_ : Union[str, Any] = [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = self.get_lang_token(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = self.lang_token_to_id[lang_token]
lowercase_ : str = [self.cur_lang_id]
lowercase_ : List[str] = [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.lang_code_to_token[lang]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.get_lang_token(__SCREAMING_SNAKE_CASE )
return self.lang_token_to_id[lang_token]
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict[str, Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = sentencepiece.SentencePieceProcessor(**__SCREAMING_SNAKE_CASE )
spm.Load(str(__SCREAMING_SNAKE_CASE ) )
return spm
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
return json.load(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , indent=2 )
| 93 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __UpperCamelCase ( ):
__UpperCAmelCase : List[str] = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
__UpperCAmelCase : int = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw ).convert("RGB" )
return image
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Tuple = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : str = dct.pop(_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = val
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCAmelCase : Any = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
__UpperCAmelCase : Any = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
__UpperCAmelCase : Tuple = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase, requires_grad=_UpperCAmelCase ), v_bias) )
__UpperCAmelCase : Optional[Any] = qkv_bias
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = 364 if "coco" in model_name else 224
__UpperCAmelCase : Optional[int] = InstructBlipVisionConfig(image_size=_UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__UpperCAmelCase : int = TaConfig.from_pretrained("google/flan-t5-xl", dense_act_fn="gelu", bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCAmelCase : Dict = TaConfig.from_pretrained("google/flan-t5-xxl", dense_act_fn="gelu", bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__UpperCAmelCase : Tuple = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf", vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
__UpperCAmelCase : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf", vocab_size=32001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__UpperCAmelCase : Optional[int] = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
__UpperCAmelCase : int = InstructBlipConfig(vision_config=_UpperCAmelCase, text_config=_UpperCAmelCase, qformer_config=_UpperCAmelCase )
return config, image_size
@torch.no_grad()
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=False ):
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("bert-base-uncased", truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
__UpperCAmelCase : int = TaTokenizerFast.from_pretrained("google/flan-t5-xl", truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__UpperCAmelCase : Union[str, Any] = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b", truncation_side="left", bos_token="</s>", unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
__UpperCAmelCase , __UpperCAmelCase : str = get_blipa_config(_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = InstructBlipForConditionalGeneration(_UpperCAmelCase ).eval()
__UpperCAmelCase : str = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
__UpperCAmelCase , __UpperCAmelCase : Any = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
__UpperCAmelCase : Tuple = "cuda:1" if torch.cuda.is_available() else "cpu"
__UpperCAmelCase : str = "cuda:2" if torch.cuda.is_available() else "cpu"
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = load_model_and_preprocess(
name=_UpperCAmelCase, model_type=_UpperCAmelCase, is_eval=_UpperCAmelCase, device=_UpperCAmelCase )
original_model.eval()
print("Done!" )
# update state dict keys
__UpperCAmelCase : Any = original_model.state_dict()
__UpperCAmelCase : Any = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCAmelCase : str = state_dict.pop(_UpperCAmelCase )
if key.startswith("Qformer.bert" ):
__UpperCAmelCase : Any = key.replace("Qformer.bert", "qformer" )
if "attention.self" in key:
__UpperCAmelCase : Any = key.replace("self", "attention" )
if "llm_proj" in key:
__UpperCAmelCase : Optional[int] = key.replace("llm_proj", "language_projection" )
if "t5_proj" in key:
__UpperCAmelCase : Tuple = key.replace("t5_proj", "language_projection" )
if key.startswith("llm_model" ):
__UpperCAmelCase : Any = key.replace("llm_model", "language_model" )
if key.startswith("t5" ):
__UpperCAmelCase : str = key.replace("t5", "language" )
__UpperCAmelCase : Dict = val
# read in qv biases
read_in_q_v_bias(_UpperCAmelCase, _UpperCAmelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_UpperCAmelCase, strict=_UpperCAmelCase )
__UpperCAmelCase : Dict = load_demo_image()
__UpperCAmelCase : Dict = "What is unusual about this image?"
# create processor
__UpperCAmelCase : Dict = BlipImageProcessor(
size={"height": image_size, "width": image_size}, image_mean=_UpperCAmelCase, image_std=_UpperCAmelCase )
__UpperCAmelCase : List[str] = InstructBlipProcessor(
image_processor=_UpperCAmelCase, tokenizer=_UpperCAmelCase, qformer_tokenizer=_UpperCAmelCase, )
__UpperCAmelCase : Any = processor(images=_UpperCAmelCase, text=_UpperCAmelCase, return_tensors="pt" ).to(_UpperCAmelCase )
# make sure processor creates exact same pixel values
__UpperCAmelCase : List[Any] = vis_processors["eval"](_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
__UpperCAmelCase : Dict = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ), _UpperCAmelCase )
original_model.to(_UpperCAmelCase )
hf_model.to(_UpperCAmelCase )
with torch.no_grad():
if "vicuna" in model_name:
__UpperCAmelCase : Optional[Any] = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
__UpperCAmelCase : Optional[Any] = hf_model(**_UpperCAmelCase ).logits
else:
__UpperCAmelCase : str = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
__UpperCAmelCase : Optional[Any] = tokenizer("\n", return_tensors="pt" ).input_ids.to(_UpperCAmelCase )
__UpperCAmelCase : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -100 )
__UpperCAmelCase : List[str] = hf_model(**_UpperCAmelCase, labels=_UpperCAmelCase ).logits
print("First values of original logits:", original_logits[0, :3, :3] )
print("First values of HF logits:", logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__UpperCAmelCase : Any = 1E-4 if "vicuna" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ), _UpperCAmelCase, atol=_UpperCAmelCase )
print("Looks ok!" )
print("Generating with original model..." )
__UpperCAmelCase : List[Any] = original_model.generate({"image": original_pixel_values, "prompt": prompt}, num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
__UpperCAmelCase : Tuple = hf_model.generate(
**_UpperCAmelCase, do_sample=_UpperCAmelCase, num_beams=5, max_length=256, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1, )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__UpperCAmelCase : int = 2
print("Original generation:", _UpperCAmelCase )
__UpperCAmelCase : str = processor.batch_decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
__UpperCAmelCase : Dict = [text.strip() for text in output_text]
print("HF generation:", _UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(F"Salesforce/{model_name}" )
hf_model.push_to_hub(F"Salesforce/{model_name}" )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
lowerCAmelCase__ : Any = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
lowerCAmelCase__ : Union[str, Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __UpperCamelCase ( _UpperCAmelCase=None ):
if subparsers is not None:
__UpperCAmelCase : Optional[int] = subparsers.add_parser("env" )
else:
__UpperCAmelCase : List[Any] = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file", default=_UpperCAmelCase, help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Dict = torch.__version__
__UpperCAmelCase : str = torch.cuda.is_available()
__UpperCAmelCase : str = is_xpu_available()
__UpperCAmelCase : List[Any] = is_npu_available()
__UpperCAmelCase : Union[str, Any] = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file ).to_dict()
__UpperCAmelCase : List[str] = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})",
"PyTorch XPU available": str(_UpperCAmelCase ),
"PyTorch NPU available": str(_UpperCAmelCase ),
"System RAM": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
__UpperCAmelCase : int = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
__UpperCAmelCase : Tuple = (
"\n".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_UpperCAmelCase, _UpperCAmelCase )
else F"\t{accelerate_config}"
)
print(_UpperCAmelCase )
__UpperCAmelCase : Any = accelerate_config
return info
def __UpperCamelCase ( ):
__UpperCAmelCase : Tuple = env_command_parser()
__UpperCAmelCase : Dict = parser.parse_args()
env_command(_UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 37 | 1 |
"""simple docstring"""
from __future__ import annotations
A_ = '''Muhammad Umer Farooq'''
A_ = '''MIT'''
A_ = '''1.0.0'''
A_ = '''Muhammad Umer Farooq'''
A_ = '''contact@muhammadumerfarooq.me'''
A_ = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self: Optional[int], a_: str ):
'''simple docstring'''
super().__init__()
_snake_case : List[Any] = []
_snake_case : List[str] = domain
def UpperCamelCase_ ( self: List[Any], a_: str, a_: list[tuple[str, str | None]] ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_snake_case : List[Any] = parse.urljoin(self.domain, __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
return ".".join(get_sub_domain_name(__A ).split(""".""" )[-2:] )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
return parse.urlparse(__A ).netloc
def UpperCAmelCase__ (snake_case__ : List[str] = "https://github.com" ):
"""simple docstring"""
_snake_case : Dict = get_domain_name(__A )
# Initialize the parser
_snake_case : List[str] = Parser(__A )
try:
# Open URL
_snake_case : Union[str, Any] = requests.get(__A )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_snake_case : Union[str, Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_snake_case : List[Any] = requests.get(__A )
# Get the valid email.
_snake_case : Dict = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__A )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__A )
if __name__ == "__main__":
A_ = emails_from_url('''https://github.com''')
print(F'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 64 | import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCamelCase (a_ :Optional[Any] , a_ :List[Any]) -> List[Any]:
lowercase :Dict = checkpoint
lowercase :Tuple = {}
lowercase :Optional[Any] = vae_state_dict['''encoder.conv_in.weight''']
lowercase :List[str] = vae_state_dict['''encoder.conv_in.bias''']
lowercase :List[str] = vae_state_dict['''encoder.conv_out.weight''']
lowercase :Optional[Any] = vae_state_dict['''encoder.conv_out.bias''']
lowercase :Optional[Any] = vae_state_dict['''encoder.norm_out.weight''']
lowercase :Tuple = vae_state_dict['''encoder.norm_out.bias''']
lowercase :Any = vae_state_dict['''decoder.conv_in.weight''']
lowercase :Dict = vae_state_dict['''decoder.conv_in.bias''']
lowercase :Dict = vae_state_dict['''decoder.conv_out.weight''']
lowercase :List[str] = vae_state_dict['''decoder.conv_out.bias''']
lowercase :str = vae_state_dict['''decoder.norm_out.weight''']
lowercase :int = vae_state_dict['''decoder.norm_out.bias''']
lowercase :List[Any] = vae_state_dict['''quant_conv.weight''']
lowercase :Optional[Any] = vae_state_dict['''quant_conv.bias''']
lowercase :Optional[Any] = vae_state_dict['''post_quant_conv.weight''']
lowercase :int = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
lowercase :List[str] = len({'''.'''.join(layer.split('''.''')[:3]) for layer in vae_state_dict if '''encoder.down''' in layer})
lowercase :Optional[int] = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(a_)
}
# Retrieves the keys for the decoder up blocks only
lowercase :List[Any] = len({'''.'''.join(layer.split('''.''')[:3]) for layer in vae_state_dict if '''decoder.up''' in layer})
lowercase :Any = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(a_)
}
for i in range(a_):
lowercase :Dict = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
lowercase :Dict = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""")
lowercase :Optional[int] = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""")
lowercase :Optional[Any] = renew_vae_resnet_paths(a_)
lowercase :Dict = {'''old''': F"""down.{i}.block""", '''new''': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_)
lowercase :str = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
lowercase :Union[str, Any] = 2
for i in range(1 , num_mid_res_blocks + 1):
lowercase :Tuple = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
lowercase :Any = renew_vae_resnet_paths(a_)
lowercase :Any = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_)
lowercase :Optional[Any] = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
lowercase :List[str] = renew_vae_attention_paths(a_)
lowercase :Optional[Any] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_)
conv_attn_to_linear(a_)
for i in range(a_):
lowercase :Any = num_up_blocks - 1 - i
lowercase :Optional[Any] = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
lowercase :List[Any] = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
lowercase :int = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
lowercase :str = renew_vae_resnet_paths(a_)
lowercase :List[str] = {'''old''': F"""up.{block_id}.block""", '''new''': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_)
lowercase :int = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
lowercase :int = 2
for i in range(1 , num_mid_res_blocks + 1):
lowercase :Tuple = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
lowercase :int = renew_vae_resnet_paths(a_)
lowercase :List[str] = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_)
lowercase :str = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
lowercase :str = renew_vae_attention_paths(a_)
lowercase :Optional[int] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_)
conv_attn_to_linear(a_)
return new_checkpoint
def lowerCamelCase (a_ :Optional[Any] , a_ :Tuple , ) -> List[Any]:
# Only support V1
lowercase :int = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''')
lowercase :List[Any] = io.BytesIO(r.content)
lowercase :Optional[int] = OmegaConf.load(a_)
lowercase :Any = 512
lowercase :Dict = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors'''):
from safetensors import safe_open
lowercase :Optional[int] = {}
with safe_open(a_ , framework='''pt''' , device='''cpu''') as f:
for key in f.keys():
lowercase :str = f.get_tensor(a_)
else:
lowercase :Optional[Any] = torch.load(a_ , map_location=a_)['''state_dict''']
# Convert the VAE model.
lowercase :str = create_vae_diffusers_config(a_ , image_size=a_)
lowercase :Dict = custom_convert_ldm_vae_checkpoint(a_ , a_)
lowercase :Optional[int] = AutoencoderKL(**a_)
vae.load_state_dict(a_)
vae.save_pretrained(a_)
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
UpperCAmelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 364 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
UpperCAmelCase = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
UpperCAmelCase = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
UpperCAmelCase = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
UpperCAmelCase = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __snake_case ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def __snake_case ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple=[1, 1_0, 1_0_0] , snake_case__ : List[str]=4 , snake_case__ : Tuple=3.0 ):
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=snake_case__ ) as executor:
lowercase :Optional[Any] = []
lowercase :Optional[Any] = Counter()
lowercase :Optional[int] = 0
lowercase :int = defaultdict(snake_case__ )
for task_id, (candidates, test_case) in enumerate(zip(snake_case__ , snake_case__ ) ):
for candidate in candidates:
lowercase :int = candidate + '''\n''' + test_case
lowercase :int = (test_program, timeout, task_id, completion_id[task_id])
lowercase :Optional[int] = executor.submit(snake_case__ , *snake_case__ )
futures.append(snake_case__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(snake_case__ ):
lowercase :Dict = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
lowercase , lowercase :List[str] = [], []
for result in results.values():
result.sort()
lowercase :int = [r[1]['''passed'''] for r in result]
total.append(len(snake_case__ ) )
correct.append(sum(snake_case__ ) )
lowercase :List[str] = np.array(snake_case__ )
lowercase :Optional[Any] = np.array(snake_case__ )
lowercase :str = k
lowercase :int = {f"""pass@{k}""": estimate_pass_at_k(snake_case__ , snake_case__ , snake_case__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCamelCase (a_ :Optional[Any] , a_ :Any , a_ :Any) -> List[Any]:
def estimator(a_ :int , a_ :int , a_ :int) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1))
if isinstance(a_ , a_):
lowercase :Optional[int] = itertools.repeat(a_ , len(a_))
else:
assert len(a_) == len(a_)
lowercase :List[Any] = iter(a_)
return np.array([estimator(int(a_) , int(a_) , a_) for n, c in zip(a_ , a_)])
| 172 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCAmelCase__ ( a__: int = 3 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if isinstance(a__ , a__ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(a__ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 1_0:
raise ValueError('number of qubits too large to simulate(>10).' )
_UpperCAmelCase = QuantumRegister(a__ , 'qr' )
_UpperCAmelCase = ClassicalRegister(a__ , 'cr' )
_UpperCAmelCase = QuantumCircuit(a__ , a__ )
_UpperCAmelCase = number_of_qubits
for i in range(a__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(a__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , a__ , a__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(a__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(a__ , a__ )
# simulate with 10000 shots
_UpperCAmelCase = Aer.get_backend('qasm_simulator' )
_UpperCAmelCase = execute(a__ , a__ , shots=1_0_0_0_0 )
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 329 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = process
_UpperCAmelCase = params
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dataset[i]
_UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params )
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = loader
_UpperCAmelCase = infer
_UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCAmelCase = None
_UpperCAmelCase = loader_batch_size
# Internal bookkeeping
_UpperCAmelCase = None
_UpperCAmelCase = None
def __len__( self ) -> Any:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
_UpperCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCAmelCase = next(self.iterator )
_UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCAmelCase = processed
_UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
_UpperCAmelCase = None
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.subiterator is None:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
_UpperCAmelCase = next(self.subiterator )
return processed
class __a ( UpperCAmelCase ):
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = False
_UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
_UpperCAmelCase = processed
_UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
_UpperCAmelCase = processed
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
return accumulator
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = key
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = keya
_UpperCAmelCase = keya
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 329 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
def __magic_name__ ( A ) -> bytes:
snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(A ).content
if __name__ == "__main__":
lowerCAmelCase_ = input("Enter Video/IGTV url: ").strip()
lowerCAmelCase_ = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 332 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 332 | 1 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = '''efficientformer'''
def __init__(self , SCREAMING_SNAKE_CASE__ = [3, 2, 6, 4] , SCREAMING_SNAKE_CASE__ = [48, 96, 2_24, 4_48] , SCREAMING_SNAKE_CASE__ = [True, True, True, True] , SCREAMING_SNAKE_CASE__ = 4_48 , SCREAMING_SNAKE_CASE__ = 32 , SCREAMING_SNAKE_CASE__ = 4 , SCREAMING_SNAKE_CASE__ = 7 , SCREAMING_SNAKE_CASE__ = 5 , SCREAMING_SNAKE_CASE__ = 8 , SCREAMING_SNAKE_CASE__ = 4 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = 16 , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = 1E-5 , SCREAMING_SNAKE_CASE__ = "gelu" , SCREAMING_SNAKE_CASE__ = 0.02 , SCREAMING_SNAKE_CASE__ = 1E-12 , SCREAMING_SNAKE_CASE__ = 2_24 , SCREAMING_SNAKE_CASE__ = 1E-05 , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = hidden_sizes
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Any = depths
SCREAMING_SNAKE_CASE__ : Dict = mlp_expansion_ratio
SCREAMING_SNAKE_CASE__ : int = downsamples
SCREAMING_SNAKE_CASE__ : Optional[int] = dim
SCREAMING_SNAKE_CASE__ : Tuple = key_dim
SCREAMING_SNAKE_CASE__ : Tuple = attention_ratio
SCREAMING_SNAKE_CASE__ : Union[str, Any] = resolution
SCREAMING_SNAKE_CASE__ : List[str] = pool_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = downsample_patch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = downsample_stride
SCREAMING_SNAKE_CASE__ : int = downsample_pad
SCREAMING_SNAKE_CASE__ : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE__ : Any = num_metaad_blocks
SCREAMING_SNAKE_CASE__ : List[Any] = distillation
SCREAMING_SNAKE_CASE__ : Tuple = use_layer_scale
SCREAMING_SNAKE_CASE__ : List[Any] = layer_scale_init_value
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : str = batch_norm_eps
| 25 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[Any] , snake_case: List[str] , snake_case: Optional[Any]=13 , snake_case: List[str]=7 , snake_case: Dict=True , snake_case: List[str]=True , snake_case: Optional[int]=True , snake_case: Any=True , snake_case: Optional[Any]=99 , snake_case: Tuple=32 , snake_case: Tuple=5 , snake_case: Dict=4 , snake_case: Optional[Any]=37 , snake_case: Union[str, Any]="gelu" , snake_case: Tuple=0.1 , snake_case: List[Any]=0.1 , snake_case: List[str]=512 , snake_case: Optional[int]=16 , snake_case: int=2 , snake_case: List[Any]=0.0_2 , snake_case: Union[str, Any]=4 , ) -> List[str]:
snake_case_ :Dict = parent
snake_case_ :Any = batch_size
snake_case_ :Any = seq_length
snake_case_ :List[str] = is_training
snake_case_ :Optional[Any] = use_attention_mask
snake_case_ :Dict = use_token_type_ids
snake_case_ :Union[str, Any] = use_labels
snake_case_ :str = vocab_size
snake_case_ :int = hidden_size
snake_case_ :List[str] = num_hidden_layers
snake_case_ :Dict = num_attention_heads
snake_case_ :Any = intermediate_size
snake_case_ :Tuple = hidden_act
snake_case_ :int = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Any = max_position_embeddings
snake_case_ :Union[str, Any] = type_vocab_size
snake_case_ :Optional[int] = type_sequence_label_size
snake_case_ :Union[str, Any] = initializer_range
snake_case_ :Tuple = num_choices
def lowerCAmelCase_ ( self: Tuple ) -> str:
snake_case_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ :Union[str, Any] = None
if self.use_attention_mask:
snake_case_ :str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ :Any = None
if self.use_token_type_ids:
snake_case_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ :int = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
snake_case_ :str = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ :Optional[int] = config_and_inputs
snake_case_ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self: Optional[Any] ) -> Any:
snake_case_ :int = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ :Dict = config_and_inputs
snake_case_ :Union[str, Any] = True
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = True
_A : Dict = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = FlaxBertModelTester(self )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
snake_case_ :Dict = FlaxBertModel.from_pretrained("""bert-base-cased""" )
snake_case_ :Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
| 66 | 0 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__UpperCamelCase : List[str] = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
__UpperCamelCase : Tuple = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
__UpperCamelCase : Dict = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
__UpperCamelCase : Union[str, Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
__UpperCamelCase : List[str] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
__UpperCamelCase : List[Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
__UpperCamelCase : Union[str, Any] = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
lowerCAmelCase ,lowerCAmelCase = randrange(len(_UpperCAmelCase ) ), randrange(len(_UpperCAmelCase ) )
lowerCAmelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
lowerCAmelCase ,lowerCAmelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 100 ) -> Any:
return (generate_random_hand() for _ in range(_UpperCAmelCase ))
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Dict:
assert PokerHand(_UpperCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
assert PokerHand(_UpperCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = PokerHand(_UpperCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
assert PokerHand(_UpperCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ) -> Optional[int]:
assert PokerHand(_UpperCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ) -> Optional[Any]:
assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
lowerCAmelCase = [PokerHand(_UpperCAmelCase ) for hand in SORTED_HANDS]
lowerCAmelCase = poker_hands.copy()
shuffle(_UpperCAmelCase )
lowerCAmelCase = chain(sorted(_UpperCAmelCase ) )
for index, hand in enumerate(_UpperCAmelCase ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
# Test that five high straights are compared correctly.
lowerCAmelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_UpperCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE () -> Dict:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowerCAmelCase = PokerHand('2C 4S AS 3D 5C' )
lowerCAmelCase = True
lowerCAmelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE () -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowerCAmelCase = 0
lowerCAmelCase = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'poker_hands.txt' )
with open(_UpperCAmelCase ) as file_hand:
for line in file_hand:
lowerCAmelCase = line[:14].strip()
lowerCAmelCase = line[15:].strip()
lowerCAmelCase ,lowerCAmelCase = PokerHand(_UpperCAmelCase ), PokerHand(_UpperCAmelCase )
lowerCAmelCase = player.compare_with(_UpperCAmelCase )
if output == "Win":
answer += 1
assert answer == 376
| 371 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
SCREAMING_SNAKE_CASE__ = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
SCREAMING_SNAKE_CASE__ = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
SCREAMING_SNAKE_CASE__ = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=1_0000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
SCREAMING_SNAKE_CASE__ = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} )
SCREAMING_SNAKE_CASE__ = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
SCREAMING_SNAKE_CASE__ = field(default=5_0000 , metadata={'''help''': '''Maximum number of training steps.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
SCREAMING_SNAKE_CASE__ = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
SCREAMING_SNAKE_CASE__ = field(default=1 , metadata={'''help''': '''Training seed.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
SCREAMING_SNAKE_CASE__ = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
SCREAMING_SNAKE_CASE__ = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
SCREAMING_SNAKE_CASE__ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
SCREAMING_SNAKE_CASE__ = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
SCREAMING_SNAKE_CASE__ = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
SCREAMING_SNAKE_CASE__ = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
SCREAMING_SNAKE_CASE__ = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
SCREAMING_SNAKE_CASE__ = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
SCREAMING_SNAKE_CASE__ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
SCREAMING_SNAKE_CASE__ = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
SCREAMING_SNAKE_CASE__ = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=10_0000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
SCREAMING_SNAKE_CASE__ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
SCREAMING_SNAKE_CASE__ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
SCREAMING_SNAKE_CASE__ = field(default=20_0000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=3_2768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
SCREAMING_SNAKE_CASE__ = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
SCREAMING_SNAKE_CASE__ = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 323 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small"""
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = """en_speaker_1"""
SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string"""
SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json"""
SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings"""
def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : List[str] = 35
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 323 | 1 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__A = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
__A = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
__A = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__A = f'down_blocks.{i}.resnets.{j}.'
__A = f'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__A = f'down_blocks.{i}.attentions.{j}.'
__A = f'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__A = f'up_blocks.{i}.resnets.{j}.'
__A = f'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__A = f'up_blocks.{i}.attentions.{j}.'
__A = f'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__A = f'down_blocks.{i}.downsamplers.0.conv.'
__A = f'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__A = f'up_blocks.{i}.upsamplers.0.'
__A = f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__A = '''mid_block.attentions.0.'''
__A = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__A = f'mid_block.resnets.{j}.'
__A = f'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __a ( lowerCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_= {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase_= sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase_= v.replace(a__ ,a__ )
UpperCAmelCase_= v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase_= v.replace(a__ ,a__ )
UpperCAmelCase_= v
UpperCAmelCase_= {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__A = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__A = f'encoder.down_blocks.{i}.resnets.{j}.'
__A = f'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__A = f'down_blocks.{i}.downsamplers.0.'
__A = f'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__A = f'up_blocks.{i}.upsamplers.0.'
__A = f'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__A = f'decoder.up_blocks.{i}.resnets.{j}.'
__A = f'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__A = f'mid_block.resnets.{i}.'
__A = f'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__A = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def __a ( lowerCAmelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return w.reshape(*w.shape ,1 ,1 )
def __a ( lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_= {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase_= v.replace(a__ ,a__ )
UpperCAmelCase_= v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase_= v.replace(a__ ,a__ )
UpperCAmelCase_= v
UpperCAmelCase_= {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase_= ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
UpperCAmelCase_= reshape_weight_for_sd(a__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__A = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
__A = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__A = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__A = {'''q''': 0, '''k''': 1, '''v''': 2}
def __a ( lowerCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_= {}
UpperCAmelCase_= {}
UpperCAmelCase_= {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
UpperCAmelCase_= k[: -len(""".q_proj.weight""" )]
UpperCAmelCase_= k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase_= [None, None, None]
UpperCAmelCase_= v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
UpperCAmelCase_= k[: -len(""".q_proj.bias""" )]
UpperCAmelCase_= k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase_= [None, None, None]
UpperCAmelCase_= v
continue
UpperCAmelCase_= textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] ,a__ )
UpperCAmelCase_= v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
UpperCAmelCase_= textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] ,a__ )
UpperCAmelCase_= torch.cat(a__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
UpperCAmelCase_= textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] ,a__ )
UpperCAmelCase_= torch.cat(a__ )
return new_state_dict
def __a ( lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
__A = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__A = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
__A = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
__A = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__A = load_file(unet_path, device='''cpu''')
else:
__A = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
__A = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
__A = load_file(vae_path, device='''cpu''')
else:
__A = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
__A = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
__A = load_file(text_enc_path, device='''cpu''')
else:
__A = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
__A = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
__A = convert_unet_state_dict(unet_state_dict)
__A = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__A = convert_vae_state_dict(vae_state_dict)
__A = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__A = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__A = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
__A = convert_text_enc_state_dict_vaa(text_enc_dict)
__A = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
__A = convert_text_enc_state_dict(text_enc_dict)
__A = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__A = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__A = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__A = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 360 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A = 16
__A = 32
def __a ( lowerCAmelCase_ : Accelerator ,lowerCAmelCase_ : int = 16 ,lowerCAmelCase_ : str = "bert-base-cased" ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= AutoTokenizer.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_= load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowerCAmelCase_ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_= tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowerCAmelCase_ ,max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_= datasets.map(
lowerCAmelCase_ ,batched=lowerCAmelCase_ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,load_from_cache_file=lowerCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_= tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowerCAmelCase_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ ,padding="""max_length""" ,max_length=1_28 ,return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase_ ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCAmelCase_= DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ )
UpperCAmelCase_= DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_= Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_= config["""lr"""]
UpperCAmelCase_= int(config["""num_epochs"""] )
UpperCAmelCase_= int(config["""seed"""] )
UpperCAmelCase_= int(config["""batch_size"""] )
UpperCAmelCase_= args.model_name_or_path
set_seed(lowerCAmelCase_ )
UpperCAmelCase_, UpperCAmelCase_= get_dataloaders(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_= AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ ,return_dict=lowerCAmelCase_ )
# Instantiate optimizer
UpperCAmelCase_= (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_= optimizer_cls(params=model.parameters() ,lr=lowerCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_= accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCAmelCase_= 1
UpperCAmelCase_= (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_= get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ ,num_warmup_steps=0 ,num_training_steps=lowerCAmelCase_ ,)
else:
UpperCAmelCase_= DummyScheduler(lowerCAmelCase_ ,total_num_steps=lowerCAmelCase_ ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_= 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_= 0
# Now we train the model
UpperCAmelCase_= evaluate.load("""glue""" ,"""mrpc""" )
UpperCAmelCase_= 0
UpperCAmelCase_= {}
for epoch in range(lowerCAmelCase_ ,lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_= model(**lowerCAmelCase_ )
UpperCAmelCase_= outputs.loss
UpperCAmelCase_= loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCAmelCase_= 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_= model(**lowerCAmelCase_ )
UpperCAmelCase_= outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_, UpperCAmelCase_= accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase_ ) - 1:
UpperCAmelCase_= predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_= references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase_ ,references=lowerCAmelCase_ ,)
UpperCAmelCase_= metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,lowerCAmelCase_ )
UpperCAmelCase_= eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
UpperCAmelCase_= eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,"""all_results.json""" ) ,"""w""" ) as f:
json.dump(lowerCAmelCase_ ,lowerCAmelCase_ )
def __a ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_= argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" ,type=lowerCAmelCase_ ,default="""bert-base-cased""" ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=lowerCAmelCase_ ,)
parser.add_argument(
"""--output_dir""" ,type=lowerCAmelCase_ ,default=""".""" ,help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" ,)
parser.add_argument(
"""--performance_lower_bound""" ,type=lowerCAmelCase_ ,default=lowerCAmelCase_ ,help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=lowerCAmelCase_ ,default=3 ,help="""Number of train epochs.""" ,)
UpperCAmelCase_= parser.parse_args()
UpperCAmelCase_= {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ ,lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 277 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : Optional[int] = 16 ,_a : Optional[int] = 88 ,_a : Any = None ,_a : Any = 1 ,_a : Optional[int] = 0.0 ,_a : Tuple = 32 ,_a : List[str] = None ,_a : Union[str, Any] = False ,_a : Union[str, Any] = None ,_a : Optional[Any] = None ,_a : Dict = "geglu" ,_a : str = None ,):
'''simple docstring'''
super().__init__()
_a : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=a_ ,attention_head_dim=a_ ,in_channels=a_ ,num_layers=a_ ,dropout=a_ ,norm_num_groups=a_ ,cross_attention_dim=a_ ,attention_bias=a_ ,sample_size=a_ ,num_vector_embeds=a_ ,activation_fn=a_ ,num_embeds_ada_norm=a_ ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_a : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_a : List[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_a : int = [1, 0]
def __lowercase ( self : Dict ,_a : Dict ,_a : Optional[int] ,_a : Dict=None ,_a : Any=None ,_a : List[Any]=None ,_a : Optional[Any] = True ,):
'''simple docstring'''
_a : Tuple = hidden_states
_a : Tuple = []
_a : Optional[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_a : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_a : Any = self.transformer_index_for_condition[i]
_a : Optional[Any] = self.transformers[transformer_index](
a_ ,encoder_hidden_states=a_ ,timestep=a_ ,cross_attention_kwargs=a_ ,return_dict=a_ ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_a : List[str] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_a : Union[str, Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=a_ )
| 271 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : List[str] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
A_ : Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
A_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
A_ : Dict = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
_UpperCAmelCase : Tuple = True
# Deal with multi-line cases
elif (
re.search(
RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , lowerCAmelCase_ , )
is not None
):
_UpperCAmelCase : Any = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_UpperCAmelCase : List[str] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_UpperCAmelCase : Dict = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
_UpperCAmelCase : int = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
_UpperCAmelCase : Optional[Any] = True
if not attribute_used:
_UpperCAmelCase : List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_UpperCAmelCase : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_UpperCAmelCase : Any = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_UpperCAmelCase : Dict = True
elif attribute.endswith("""_token_id""" ):
_UpperCAmelCase : Optional[int] = True
# configuration class specific cases
if not case_allowed:
_UpperCAmelCase : int = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_UpperCAmelCase : Union[str, Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
_UpperCAmelCase : Optional[int] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
_UpperCAmelCase : Optional[int] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_UpperCAmelCase : List[Any] = {}
if len(config_class.attribute_map ) > 0:
_UpperCAmelCase : Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_UpperCAmelCase : int = inspect.getsourcefile(lowerCAmelCase_ )
_UpperCAmelCase : str = os.path.dirname(lowerCAmelCase_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_UpperCAmelCase : Optional[int] = [os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) for fn in os.listdir(lowerCAmelCase_ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
_UpperCAmelCase : str = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase_ ):
with open(lowerCAmelCase_ ) as fp:
modeling_sources.append(fp.read() )
_UpperCAmelCase : Any = []
for config_param, default_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
# `attributes` here is all the variant names for `config_param`
_UpperCAmelCase : List[str] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase_ )
def snake_case_ ( )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_UpperCAmelCase : List[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCAmelCase_ : inspect.isclass(lowerCAmelCase_ )
and issubclass(lowerCAmelCase_ , lowerCAmelCase_ )
and inspect.getmodule(lowerCAmelCase_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_UpperCAmelCase : Optional[int] = check_config_attributes_being_used(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_UpperCAmelCase : Tuple = unused_attributes
if len(lowerCAmelCase_ ) > 0:
_UpperCAmelCase : Dict = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
check_config_attributes()
| 215 | 0 |
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class UpperCamelCase__ ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase_ =1
@register_to_config
def __init__( self , _A=2000 , _A=0.1 , _A=20 , _A=1E-3 ) -> Tuple:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def _UpperCamelCase ( self , _A , _A = None ) -> str:
SCREAMING_SNAKE_CASE_ = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase__ , device=UpperCamelCase__ )
def _UpperCamelCase ( self , _A , _A , _A , _A=None ) -> Any:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
SCREAMING_SNAKE_CASE_ = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
SCREAMING_SNAKE_CASE_ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
SCREAMING_SNAKE_CASE_ = std.flatten()
while len(std.shape ) < len(score.shape ):
SCREAMING_SNAKE_CASE_ = std.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = -score / std
# compute
SCREAMING_SNAKE_CASE_ = -1.0 / len(self.timesteps )
SCREAMING_SNAKE_CASE_ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
SCREAMING_SNAKE_CASE_ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
SCREAMING_SNAKE_CASE_ = beta_t.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = -0.5 * beta_t * x
SCREAMING_SNAKE_CASE_ = torch.sqrt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = drift - diffusion**2 * score
SCREAMING_SNAKE_CASE_ = x + drift * dt
# add noise
SCREAMING_SNAKE_CASE_ = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase__ , device=x.device , dtype=x.dtype )
SCREAMING_SNAKE_CASE_ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 363 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_ = BitConfig(
conv_layer=__lowerCamelCase, num_labels=10_00, idalabel=__lowerCamelCase, labelaid=__lowerCamelCase, )
return config
def A__ ( __lowerCamelCase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_ = '''bit.encoder.''' + name
return name
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
SCREAMING_SNAKE_CASE_ = get_config(__lowerCamelCase )
# load original model from timm
SCREAMING_SNAKE_CASE_ = create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = BitForImageClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# create image processor
SCREAMING_SNAKE_CASE_ = create_transform(**resolve_data_config({}, model=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = transform.transforms
SCREAMING_SNAKE_CASE_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__lowerCamelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__lowerCamelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__lowerCamelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = transform(__lowerCamelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = processor(__lowerCamelCase, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCamelCase, __lowerCamelCase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_ = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257 | 0 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : List[str] = 0
for ch in input_str:
_a : Optional[Any] = ord(lowerCAmelCase_ )
_a : Tuple = pow(2 , lowerCAmelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
A_ :List[str] = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
A_ :Optional[Any] = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def A ( a_ ,a_ ) -> str:
__UpperCamelCase : Any ={
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__UpperCamelCase : Tuple =int(re.match(r'.*layer_(\d*).*' ,a_ )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def A ( a_ ) -> Any:
if dtype == torch.bool:
return 1 / 8
__UpperCamelCase : Dict =re.search(r'[^\d](\d+)$' ,str(a_ ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
__UpperCamelCase : Tuple =int(bit_search.groups()[0] )
return bit_size // 8
def A ( a_ ,a_ ,a_ ,a_ ,a_ ) -> Dict:
# Construct model
if bloom_config_file == "":
__UpperCamelCase : List[Any] =BloomConfig()
else:
__UpperCamelCase : List[str] =BloomConfig.from_json_file(a_ )
if shard_model:
__UpperCamelCase : int =os.listdir(a_ )
__UpperCamelCase : Union[str, Any] =sorted(filter(lambda a_ : s.startswith('layer' ) and "model_00" in s ,a_ ) )
__UpperCamelCase : Optional[Any] ={'weight_map': {}, 'metadata': {}}
__UpperCamelCase : Dict =0
__UpperCamelCase : int =None
__UpperCamelCase : Any =BloomConfig()
for j, file in enumerate(a_ ):
print('Processing file: {}'.format(a_ ) )
__UpperCamelCase : Optional[int] =None
for i in range(a_ ):
# load all TP files
__UpperCamelCase : Dict =file.replace('model_00' ,F'model_0{i}' )
__UpperCamelCase : Optional[Any] =torch.load(os.path.join(a_ ,a_ ) ,map_location='cpu' )
# Rename keys in the transformers names
__UpperCamelCase : int =list(temp.keys() )
for key in keys:
__UpperCamelCase : Dict =temp.pop(a_ )
if tensors is None:
__UpperCamelCase : Any =temp
else:
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__UpperCamelCase : List[Any] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__UpperCamelCase : Any =torch.cat([tensors[key], temp[key]] ,dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__UpperCamelCase : Optional[Any] =tensors[key] / pretraining_tp
torch.save(
a_ ,os.path.join(
a_ ,'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) ,str(len(a_ ) ).zfill(5 ) ) ,) ,)
for key in tensors.keys():
__UpperCamelCase : Union[str, Any] =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__UpperCamelCase : int ='pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) ,str(len(a_ ) ).zfill(5 ) )
__UpperCamelCase : Union[str, Any] =BloomConfig()
__UpperCamelCase : Tuple =pytorch_dump_folder_path + '/' + CONFIG_NAME
__UpperCamelCase : Optional[int] =total_size
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(a_ ,WEIGHTS_NAME + '.index.json' ) ,'w' ,encoding='utf-8' ) as f:
__UpperCamelCase : List[Any] =json.dumps(a_ ,indent=2 ,sort_keys=a_ ) + '\n'
f.write(a_ )
else:
__UpperCamelCase : List[Any] =BloomModel(a_ )
__UpperCamelCase : Optional[Any] =os.listdir(a_ )
__UpperCamelCase : Dict =sorted(filter(lambda a_ : s.startswith('layer' ) and "model_00" in s ,a_ ) )
__UpperCamelCase : Any =None
for i, file in enumerate(a_ ):
__UpperCamelCase : Union[str, Any] =None
for i in range(a_ ):
# load all TP files
__UpperCamelCase : Optional[Any] =file.replace('model_00' ,F'model_0{i}' )
__UpperCamelCase : str =torch.load(os.path.join(a_ ,a_ ) ,map_location='cpu' )
# Rename keys in the transformers names
__UpperCamelCase : List[str] =list(temp.keys() )
for key in keys:
__UpperCamelCase : Union[str, Any] =temp.pop(a_ )
if tensors is None:
__UpperCamelCase : Optional[Any] =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__UpperCamelCase : Optional[int] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__UpperCamelCase : int =torch.cat([tensors[key], temp[key]] ,dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__UpperCamelCase : Dict =tensors[key] / pretraining_tp
__UpperCamelCase : str =model.load_state_dict(a_ ,strict=a_ )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
__UpperCamelCase : str =set(other_keys.missing_keys )
else:
__UpperCamelCase : int =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(a_ ,exist_ok=a_ )
__UpperCamelCase : Optional[int] =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__UpperCamelCase : Dict =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
__UpperCamelCase : List[str] =model.to(config.torch_dtype )
torch.save(model.state_dict() ,a_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
A_ :str = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 71 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
lowercase : Optional[int] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowercase : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE__ ( __A ) -> list[list[int]]:
_snake_case = []
for i in range(len(__A ) ):
_snake_case = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_snake_case = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__A ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__A ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__A ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_snake_case = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__A )
return next_generation
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list[Image.Image]:
_snake_case = []
for _ in range(__A ):
# Create output image
_snake_case = Image.new('RGB' , (len(cells[0] ), len(__A )) )
_snake_case = img.load()
# Save cells to image
for x in range(len(__A ) ):
for y in range(len(cells[0] ) ):
_snake_case = 255 - cells[y][x] * 255
_snake_case = (colour, colour, colour)
# Save image
images.append(__A )
_snake_case = new_generation(__A )
return images
if __name__ == "__main__":
lowercase : str = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 160 |
'''simple docstring'''
import os
import sys
import unittest
lowercase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase : List[Any] = os.path.join(git_repo_path, "src", "diffusers")
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = find_backend(' if not is_torch_available():' )
self.assertEqual(lowerCAmelCase_ , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(lowerCAmelCase_ , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(lowerCAmelCase_ , 'torch_and_transformers_and_onnx' )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowerCAmelCase_ )
self.assertIn('torch_and_transformers' , lowerCAmelCase_ )
self.assertIn('flax_and_transformers' , lowerCAmelCase_ )
self.assertIn('torch_and_transformers_and_onnx' , lowerCAmelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowerCAmelCase_ , '\nCONSTANT = None\n' )
_snake_case = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowerCAmelCase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
_snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_snake_case = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowerCAmelCase_ )
| 160 | 1 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _SCREAMING_SNAKE_CASE (A , A , A , A ) -> Optional[int]:
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def _SCREAMING_SNAKE_CASE (A , A , A , A , A=True ) -> Any:
"""simple docstring"""
model.train()
lowercase__ = model(A )
lowercase__ = F.mse_loss(A , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(A )
def _SCREAMING_SNAKE_CASE (A , A=False ) -> Optional[int]:
"""simple docstring"""
set_seed(42 )
lowercase__ = RegressionModel()
lowercase__ = deepcopy(A )
lowercase__ = RegressionDataset(length=80 )
lowercase__ = DataLoader(A , batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase__ = AdamW(params=model.parameters() , lr=1E-3 )
lowercase__ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
lowercase__ = LambdaLR(A , lr_lambda=lambda A : epoch**0.65 )
lowercase__ = LambdaLR(A , lr_lambda=lambda A : epoch**0.65 )
# Make a copy of `model`
if sched:
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = accelerator.prepare(A , A , A , A )
else:
lowercase__ ,lowercase__ = accelerator.prepare(A , A )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
lowercase__ ,lowercase__ ,lowercase__ = get_training_setup(A )
# Use a single batch
lowercase__ ,lowercase__ = next(iter(A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase__ ,lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ ,lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A , A , A , A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A ):
step_model(A , A , A , A )
else:
# Sync grads
step_model(A , A , A , A )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(A , A , A , A )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase__ = ddp_input[torch.randperm(len(A ) )]
def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]:
"""simple docstring"""
lowercase__ ,lowercase__ ,lowercase__ = get_training_setup(A )
# Use a single batch
lowercase__ ,lowercase__ = next(iter(A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase__ ,lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ ,lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A , A , A , A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A ):
step_model(A , A , A , A )
else:
# Sync grads
step_model(A , A , A , A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase__ = ddp_input[torch.randperm(len(A ) )]
def _SCREAMING_SNAKE_CASE (A=False , A=False ) -> Tuple:
"""simple docstring"""
lowercase__ = Accelerator(
split_batches=A , dispatch_batches=A , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase__ ,lowercase__ ,lowercase__ = get_training_setup(A )
for iteration, batch in enumerate(A ):
lowercase__ ,lowercase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ ,lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ ,lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A , A , A , A , A )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(A ):
step_model(A , A , A , A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(A ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase__ = ddp_input[torch.randperm(len(A ) )]
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE (A=False , A=False ) -> str:
"""simple docstring"""
lowercase__ = Accelerator(
split_batches=A , dispatch_batches=A , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = get_training_setup(A , A )
for iteration, batch in enumerate(A ):
lowercase__ ,lowercase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ ,lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ ,lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(A , A , A , A , A )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(A )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(A ):
step_model(A , A , A , A )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
lowercase__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(A ))
if accelerator.num_processes > 1:
check_model_parameters(A , A , A , A )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE () -> str:
"""simple docstring"""
lowercase__ = Accelerator()
lowercase__ = RegressionDataset(length=80 )
lowercase__ = DataLoader(A , batch_size=16 )
lowercase__ = RegressionDataset(length=96 )
lowercase__ = DataLoader(A , batch_size=16 )
lowercase__ ,lowercase__ = accelerator.prepare(A , A )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A )
if iteration < len(A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A )
if batch_num < len(A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Accelerator()
lowercase__ = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(A )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(A )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(A , A )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(A , A )
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 2 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=3_0 , __UpperCamelCase=4_0_0 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , __UpperCamelCase=1 / 2_5_5 , __UpperCamelCase=True , ):
"""simple docstring"""
UpperCamelCase_ = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean
UpperCamelCase_ = image_std
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_pad
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
if not batched:
UpperCamelCase_ = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
UpperCamelCase_ , UpperCamelCase_ = image.size
else:
UpperCamelCase_ , UpperCamelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase_ = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase_ = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = self.size["""shortest_edge"""]
else:
UpperCamelCase_ = []
for image in image_inputs:
UpperCamelCase_ , UpperCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : str = YolosImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = YolosImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
UpperCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2, """longest_edge""": 8_4} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase_ = self.image_processing_class(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase , do_rescale=__UpperCamelCase )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCamelCase_ = image_processing_a.pad(__UpperCamelCase , return_tensors="""pt""" )
UpperCamelCase_ = image_processing_a(__UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
UpperCamelCase_ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
UpperCamelCase_ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase_ = YolosImageProcessor(format="""coco_panoptic""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
UpperCamelCase_ = 8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
| 122 | 0 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _A ( SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : str = ComputeEnvironment.AMAZON_SAGEMAKER
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : List[Any] = "ml.p3.2xlarge"
_SCREAMING_SNAKE_CASE : Dict = "accelerate_sagemaker_execution_role"
_SCREAMING_SNAKE_CASE : Dict = "hf-sm"
_SCREAMING_SNAKE_CASE : List[str] = "us-east-1"
_SCREAMING_SNAKE_CASE : str = 1
_SCREAMING_SNAKE_CASE : int = "accelerate-sagemaker-1"
_SCREAMING_SNAKE_CASE : str = "1.6"
_SCREAMING_SNAKE_CASE : Any = "4.4"
_SCREAMING_SNAKE_CASE : List[str] = "train.py"
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
_SCREAMING_SNAKE_CASE : Any = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class _A ( unittest.TestCase ):
def __A ( self ) -> int:
'''simple docstring'''
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCAmelCase : List[str] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , __UpperCAmelCase )
assert isinstance(converted_args["""do_train"""] , __UpperCAmelCase )
assert isinstance(converted_args["""epochs"""] , __UpperCAmelCase )
assert isinstance(converted_args["""learning_rate"""] , __UpperCAmelCase )
assert isinstance(converted_args["""max_steps"""] , __UpperCAmelCase )
with pytest.raises(__UpperCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 358 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Union[str, Any] = seq_length
__UpperCAmelCase : int = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : int = scope
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[Any] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> List[str]:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_config()
__UpperCAmelCase : List[Any] = 300
return config
def __A ( self ) -> Dict:
'''simple docstring'''
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = self.prepare_config_and_inputs()
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__UpperCAmelCase : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__UpperCAmelCase : List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : int = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : str = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : int = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Dict = ()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = MraModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
return
@require_torch
class _A ( unittest.TestCase ):
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__UpperCAmelCase : str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__UpperCAmelCase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : int = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Union[str, Any] = 50_265
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : int = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__UpperCAmelCase : Dict = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : Any = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Dict = 50_265
__UpperCAmelCase : Optional[int] = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : str = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 16 | 0 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 130 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True ):
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowercase__ : Union[str, Any] = timm.create_model("levit_128s" , pretrained=lowerCamelCase__ )
else:
lowercase__ : Union[str, Any] = timm.create_model("levit_128" , pretrained=lowerCamelCase__ )
if hidden_sizes == 192:
lowercase__ : Dict = timm.create_model("levit_192" , pretrained=lowerCamelCase__ )
if hidden_sizes == 256:
lowercase__ : Optional[Any] = timm.create_model("levit_256" , pretrained=lowerCamelCase__ )
if hidden_sizes == 384:
lowercase__ : List[str] = timm.create_model("levit_384" , pretrained=lowerCamelCase__ )
from_model.eval()
lowercase__ : Union[str, Any] = LevitForImageClassificationWithTeacher(lowerCamelCase__ ).eval()
lowercase__ : Tuple = OrderedDict()
lowercase__ : Dict = from_model.state_dict()
lowercase__ : Union[str, Any] = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Union[str, Any] = weights[og_keys[i]]
our_model.load_state_dict(lowerCamelCase__ )
lowercase__ : List[str] = torch.randn((2, 3, 224, 224) )
lowercase__ : Optional[Any] = from_model(lowerCamelCase__ )
lowercase__ : Optional[Any] = our_model(lowerCamelCase__ ).logits
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ ), "The model logits don't match the original one."
lowercase__ : Optional[Any] = name
print(lowerCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True ):
"""simple docstring"""
lowercase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase__ : str = 1_000
lowercase__ : Any = (1, num_labels)
lowercase__ : Optional[Any] = "huggingface/label-files"
lowercase__ : Optional[Any] = num_labels
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : Dict = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[Any] = partial(lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ )
lowercase__ : List[str] = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
lowercase__ : int = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCamelCase__ , names_to_config[model_name] , lowerCamelCase__ , lowerCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 130 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( a__ ):
'''simple docstring'''
lowerCAmelCase : int = ["""image_processor""", """tokenizer"""]
lowerCAmelCase : List[Any] = """CLIPImageProcessor"""
lowerCAmelCase : Any = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : Dict ,_snake_case : Optional[Any]=None ,_snake_case : Dict=None ,**_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,lowerCAmelCase__ ,)
lowercase__ : Tuple = kwargs.pop('''feature_extractor''' )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase__ ,lowerCAmelCase__ )
def __call__( self : Optional[Any] ,_snake_case : Optional[int]=None ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : int = self.tokenizer(lowerCAmelCase__ ,return_tensors=lowerCAmelCase__ ,**lowerCAmelCase__ )
if images is not None:
lowercase__ : List[str] = self.image_processor(lowerCAmelCase__ ,return_tensors=lowerCAmelCase__ ,**lowerCAmelCase__ )
if text is not None and images is not None:
lowercase__ : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) ,tensor_type=lowerCAmelCase__ )
def UpperCAmelCase ( self : Optional[Any] ,*_snake_case : Tuple ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def UpperCAmelCase ( self : int ,*_snake_case : Optional[int] ,**_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ ,**lowerCAmelCase__ )
@property
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = self.tokenizer.model_input_names
lowercase__ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 363 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = 1.6021E-19 # units = C
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302 | 0 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def A_ ( _lowercase ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = np.max(_outputs, axis=-1, keepdims=_lowercase )
snake_case_ :Optional[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=_lowercase )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : List[str] = """sigmoid"""
_A : Union[str, Any] = """softmax"""
_A : Tuple = """none"""
@add_end_docstrings(
_lowerCAmelCase , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Optional[int] = False
_A : Any = ClassificationFunction.NONE
def __init__( self: Tuple , **snake_case: Any ) -> List[Any]:
super().__init__(**snake_case )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCAmelCase_ ( self: str , snake_case: str=None , snake_case: Any=None , snake_case: Union[str, Any]="" , **snake_case: List[str] ) -> Union[str, Any]:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
snake_case_ :List[Any] = tokenizer_kwargs
snake_case_ :int = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
snake_case_ :List[str] = self.model.config.return_all_scores
if isinstance(snake_case , snake_case ) or top_k is None:
snake_case_ :Optional[int] = top_k
snake_case_ :Optional[Any] = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , snake_case , )
if return_all_scores:
snake_case_ :str = None
else:
snake_case_ :Tuple = 1
if isinstance(snake_case , snake_case ):
snake_case_ :List[Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
snake_case_ :Tuple = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int , *snake_case: List[str] , **snake_case: List[Any] ) -> Union[str, Any]:
snake_case_ :str = super().__call__(*snake_case , **snake_case )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
snake_case_ :Any = """top_k""" not in kwargs
if isinstance(args[0] , snake_case ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCAmelCase_ ( self: Tuple , snake_case: str , **snake_case: int ) -> Dict[str, GenericTensor]:
snake_case_ :int = self.framework
if isinstance(snake_case , snake_case ):
return self.tokenizer(**snake_case , return_tensors=snake_case , **snake_case )
elif isinstance(snake_case , snake_case ) and len(snake_case ) == 1 and isinstance(inputs[0] , snake_case ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case , **snake_case )
elif isinstance(snake_case , snake_case ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(snake_case , return_tensors=snake_case , **snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: List[str] ) -> List[str]:
return self.model(**snake_case )
def lowerCAmelCase_ ( self: Tuple , snake_case: List[str] , snake_case: Optional[int]=None , snake_case: List[Any]=1 , snake_case: str=True ) -> int:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
snake_case_ :Optional[Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
snake_case_ :Union[str, Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
snake_case_ :int = self.model.config.function_to_apply
else:
snake_case_ :Tuple = ClassificationFunction.NONE
snake_case_ :str = model_outputs["""logits"""][0]
snake_case_ :List[str] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
snake_case_ :List[str] = sigmoid(snake_case )
elif function_to_apply == ClassificationFunction.SOFTMAX:
snake_case_ :Any = softmax(snake_case )
elif function_to_apply == ClassificationFunction.NONE:
snake_case_ :Tuple = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
snake_case_ :Union[str, Any] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(snake_case )
]
if not _legacy:
dict_scores.sort(key=lambda snake_case : x["score"] , reverse=snake_case )
if top_k is not None:
snake_case_ :int = dict_scores[:top_k]
return dict_scores
| 66 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 1 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=64 , A_=5 , A_=4 , A_=64 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.0_2 , A_=3 , A_=4 , A_=None , ) -> List[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __snake_case ( self ) -> Optional[int]:
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def __snake_case ( self ) -> str:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self ) -> str:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
lowerCAmelCase = MPNetModel(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , A_ )
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
lowerCAmelCase = MPNetForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = MPNetForSequenceClassification(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
lowerCAmelCase = self.num_choices
lowerCAmelCase = MPNetForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
lowerCAmelCase = self.num_labels
lowerCAmelCase = MPNetForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
((lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase)) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
UpperCAmelCase : List[str] = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Union[str, Any] = True
def __snake_case ( self ) -> int:
lowerCAmelCase = MPNetModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __snake_case ( self ) -> int:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*A_ )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*A_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*A_ )
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase = model(A_ )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , A_ )
lowerCAmelCase = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4 ) ) | 358 |
'''simple docstring'''
from torch import nn
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' ) | 187 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "swinv2"
UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Tuple , UpperCamelCase: int=2_24 , UpperCamelCase: Any=4 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=96 , UpperCamelCase: Union[str, Any]=[2, 2, 6, 2] , UpperCamelCase: Any=[3, 6, 12, 24] , UpperCamelCase: int=7 , UpperCamelCase: Dict=4.0 , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[str]=0.0 , UpperCamelCase: List[str]=0.0 , UpperCamelCase: List[str]=0.1 , UpperCamelCase: Any="gelu" , UpperCamelCase: List[Any]=False , UpperCamelCase: Any=0.02 , UpperCamelCase: Tuple=1e-5 , UpperCamelCase: str=32 , **UpperCamelCase: Dict , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = len(UpperCamelCase )
A__ = num_heads
A__ = window_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = use_absolute_embeddings
A__ = layer_norm_eps
A__ = initializer_range
A__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ = int(embed_dim * 2 ** (len(UpperCamelCase ) - 1) )
A__ = (0, 0, 0, 0)
| 335 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 1 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
A__ = 8
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=BITS ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = x.device
snake_case__ : Dict = (x * 255).int().clamp(0 , 255 )
snake_case__ : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase )
snake_case__ : Optional[int] = rearrange(__lowerCAmelCase , '''d -> d 1 1''' )
snake_case__ : Dict = rearrange(__lowerCAmelCase , '''b c h w -> b c 1 h w''' )
snake_case__ : Tuple = ((x & mask) != 0).float()
snake_case__ : str = rearrange(__lowerCAmelCase , '''b c d h w -> b (c d) h w''' )
snake_case__ : List[Any] = bits * 2 - 1
return bits
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=BITS ) -> List[str]:
"""simple docstring"""
snake_case__ : Any = x.device
snake_case__ : Union[str, Any] = (x > 0).int()
snake_case__ : List[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase , dtype=torch.intaa )
snake_case__ : str = rearrange(__lowerCAmelCase , '''d -> d 1 1''' )
snake_case__ : List[str] = rearrange(__lowerCAmelCase , '''b (c d) h w -> b c d h w''' , d=8 )
snake_case__ : int = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def _lowerCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = True , __lowerCAmelCase=None , __lowerCAmelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
snake_case__ : Optional[Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
snake_case__ : Optional[int] = self.alphas_cumprod[timestep]
snake_case__ : Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
snake_case__ : Optional[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
snake_case__ : Optional[int] = self.bit_scale
if self.config.clip_sample:
snake_case__ : Tuple = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
snake_case__ : Union[str, Any] = self._get_variance(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Optional[Any] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
snake_case__ : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ : Dict = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ : int = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
snake_case__ : List[Any] = model_output.device if torch.is_tensor(__lowerCAmelCase ) else '''cpu'''
snake_case__ : Optional[int] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase ).to(__lowerCAmelCase )
snake_case__ : List[str] = self._get_variance(__lowerCAmelCase , __lowerCAmelCase ) ** 0.5 * eta * noise
snake_case__ : Optional[int] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def _lowerCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="epsilon" , __lowerCAmelCase=None , __lowerCAmelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
snake_case__ : Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
snake_case__ , snake_case__ : Tuple = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
snake_case__ : Dict = None
# 1. compute alphas, betas
snake_case__ : Tuple = self.alphas_cumprod[t]
snake_case__ : str = self.alphas_cumprod[t - 1] if t > 0 else self.one
snake_case__ : int = 1 - alpha_prod_t
snake_case__ : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
snake_case__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
snake_case__ : Optional[int] = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
snake_case__ : Optional[int] = self.bit_scale
if self.config.clip_sample:
snake_case__ : str = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ : str = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
snake_case__ : List[Any] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case__ : List[str] = 0
if t > 0:
snake_case__ : Any = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__lowerCAmelCase ).to(model_output.device )
snake_case__ : str = (self._get_variance(__lowerCAmelCase , predicted_variance=__lowerCAmelCase ) ** 0.5) * noise
snake_case__ : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
class a ( __lowerCamelCase ):
def __init__( self :List[Any] ,__lowercase :UNetaDConditionModel ,__lowercase :Union[DDIMScheduler, DDPMScheduler] ,__lowercase :Optional[float] = 1.0 ,):
super().__init__()
snake_case__ : Optional[int] = bit_scale
snake_case__ : Optional[Any] = (
ddim_bit_scheduler_step if isinstance(__lowercase ,__lowercase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__lowercase ,scheduler=__lowercase )
@torch.no_grad()
def __call__( self :Union[str, Any] ,__lowercase :Optional[int] = 2_5_6 ,__lowercase :Optional[int] = 2_5_6 ,__lowercase :Optional[int] = 5_0 ,__lowercase :Optional[torch.Generator] = None ,__lowercase :Optional[int] = 1 ,__lowercase :Optional[str] = "pil" ,__lowercase :bool = True ,**__lowercase :Optional[Any] ,):
snake_case__ : Tuple = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=__lowercase ,)
snake_case__ : int = decimal_to_bits(__lowercase ) * self.bit_scale
snake_case__ : Optional[Any] = latents.to(self.device )
self.scheduler.set_timesteps(__lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
snake_case__ : List[Any] = self.unet(__lowercase ,__lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case__ : Optional[Any] = self.scheduler.step(__lowercase ,__lowercase ,__lowercase ).prev_sample
snake_case__ : List[str] = bits_to_decimal(__lowercase )
if output_type == "pil":
snake_case__ : Optional[int] = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 44 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = original_name.split('''.''' )[0]
snake_case__ : List[str] = key.split('''.''' )
snake_case__ : Optional[int] = int(key_list[key_list.index(__lowerCAmelCase ) - 2] )
snake_case__ : Optional[int] = int(key_list[key_list.index(__lowerCAmelCase ) - 1] )
snake_case__ : Any = orig_block_num - offset
snake_case__ : Tuple = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = OrderedDict()
snake_case__ , snake_case__ : List[str] = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case__ : int = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case__ : Tuple = key[: key.find('''proj''' )]
snake_case__ : Union[str, Any] = key.replace(__lowerCAmelCase , f"""patch_embeddings.{total_embed_found}.""" )
snake_case__ : Dict = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case__ : Optional[int] = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case__ : Optional[int] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case__ : Optional[Any] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case__ : int = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case__ : Tuple = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case__ : str = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case__ : Optional[int] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case__ : Union[str, Any] = key.replace('''head''' , '''classifier''' )
snake_case__ : Union[str, Any] = value
return new_state_dict
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : List[str] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
@torch.no_grad()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = PoolFormerConfig()
# set attributes based on model_name
snake_case__ : List[Any] = '''huggingface/label-files'''
snake_case__ : Union[str, Any] = model_name[-3:]
snake_case__ : List[Any] = 1000
snake_case__ : Tuple = '''imagenet-1k-id2label.json'''
snake_case__ : Optional[int] = (1, 1000)
# set config attributes
snake_case__ : Dict = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Tuple = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case__ : List[str] = [2, 2, 6, 2]
snake_case__ : Union[str, Any] = [64, 128, 320, 512]
snake_case__ : Optional[int] = 4.0
snake_case__ : Tuple = 0.9
elif size == "s24":
snake_case__ : Tuple = [4, 4, 12, 4]
snake_case__ : Tuple = [64, 128, 320, 512]
snake_case__ : List[Any] = 4.0
snake_case__ : Dict = 0.9
elif size == "s36":
snake_case__ : Optional[Any] = [6, 6, 18, 6]
snake_case__ : str = [64, 128, 320, 512]
snake_case__ : List[Any] = 4.0
snake_case__ : Any = 1E-6
snake_case__ : Any = 0.9
elif size == "m36":
snake_case__ : Any = [6, 6, 18, 6]
snake_case__ : Union[str, Any] = [96, 192, 384, 768]
snake_case__ : Dict = 4.0
snake_case__ : Union[str, Any] = 1E-6
snake_case__ : List[Any] = 0.95
elif size == "m48":
snake_case__ : Optional[int] = [8, 8, 24, 8]
snake_case__ : List[str] = [96, 192, 384, 768]
snake_case__ : str = 4.0
snake_case__ : str = 1E-6
snake_case__ : Any = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case__ : Optional[Any] = PoolFormerImageProcessor(crop_pct=__lowerCAmelCase )
# Prepare image
snake_case__ : Optional[int] = prepare_img()
snake_case__ : str = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case__ : List[str] = torch.load(__lowerCAmelCase , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case__ : str = rename_keys(__lowerCAmelCase )
# create HuggingFace model and load state dict
snake_case__ : List[str] = PoolFormerForImageClassification(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Define image processor
snake_case__ : int = PoolFormerImageProcessor(crop_pct=__lowerCAmelCase )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case__ : Dict = model(__lowerCAmelCase )
snake_case__ : str = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case__ : Tuple = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
snake_case__ : Optional[int] = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
snake_case__ : int = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
snake_case__ : Optional[int] = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
snake_case__ : List[str] = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A__ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 1 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 146 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int=13 , lowerCamelCase__ : Union[str, Any]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : str=True , lowerCamelCase__ : str=True , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : List[Any]=0.02 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : str=0.6 , lowerCamelCase__ : int=None , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Any = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : List[Any] = image_size
UpperCamelCase__ : str = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : int = is_training
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : int = hidden_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : Tuple = num_attention_heads
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : str = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = type_sequence_label_size
UpperCamelCase__ : str = initializer_range
UpperCamelCase__ : str = mask_ratio
UpperCamelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : Optional[int] = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = ViTMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : List[str] = model(lowerCamelCase__ )
UpperCamelCase__ : int = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : int = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Any = model(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Any = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = config_and_inputs
UpperCamelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: Optional[Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
A: Union[str, Any] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
A: Any = False
A: str = False
A: Optional[int] = False
A: Any = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ViTMAEModelTester(self )
UpperCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ : List[str] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase__ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase__ : Optional[Any] = torch.from_numpy(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : Union[str, Any] = pt_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase__ : int = outputs[0].cpu().numpy()
UpperCamelCase__ : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Any = model_class.from_pretrained(lowerCamelCase__ )
model.to(lowerCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
# Make sure we don't have nans
UpperCamelCase__ : Union[str, Any] = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Dict = ViTMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ : Dict = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = self.default_image_processor
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : str = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Tuple = ViTMAEConfig()
UpperCamelCase__ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase__ : Dict = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**lowerCamelCase__ , noise=torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ) )
# verify the logits
UpperCamelCase__ : Optional[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase__ ) , atol=1E-4 ) )
| 146 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=13 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Dict=[1, 2, 1] , _UpperCAmelCase : Dict=[2, 2, 4] , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Optional[Any]=2.0 , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Dict=False , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Union[str, Any]=1E-5 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Any=None , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=10 , _UpperCAmelCase : Optional[Any]=8 , ):
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = embed_dim
_A = depths
_A = num_heads
_A = window_size
_A = mlp_ratio
_A = qkv_bias
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = drop_path_rate
_A = hidden_act
_A = use_absolute_embeddings
_A = patch_norm
_A = layer_norm_eps
_A = initializer_range
_A = is_training
_A = scope
_A = use_labels
_A = type_sequence_label_size
_A = encoder_stride
def lowerCAmelCase_ ( self : int ):
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : List[Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any ):
_A = SwinvaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
_A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ):
_A = SwinvaForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_A = 1
_A = SwinvaForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ):
_A = self.type_sequence_label_size
_A = SwinvaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCAmelCase : Dict = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : List[str] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Any = False
UpperCAmelCase : Tuple = False
def lowerCAmelCase_ ( self : str ):
_A = SwinvaModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def lowerCAmelCase_ ( self : int ):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Tuple ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowerCAmelCase_ ( self : Dict ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
for model_class in self.all_model_classes:
_A = True
_A = False
_A = True
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_A = outputs.attentions
_A = len(self.model_tester.depths )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A = True
_A = config.window_size**2
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_A = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_A = len(_UpperCAmelCase )
# Check attention is always last and order is fine
_A = True
_A = True
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
_A = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_A = 2
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase ) )
_A = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ):
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_A = outputs.hidden_states
_A = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# Swinv2 has a different seq_length
_A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_A = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_A , _A , _A , _A = reshaped_hidden_states[0].shape
_A = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = 3
_A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : str ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = SwinvaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_A = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Dict ):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
_UpperCAmelCase )
_A = self.default_image_processor
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
# verify the logits
_A = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_A = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 360 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = (IPNDMScheduler,)
UpperCAmelCase : Optional[Any] = (('''num_inference_steps''', 50),)
def lowerCAmelCase_ ( self : Union[str, Any] , **_UpperCAmelCase : List[Any] ):
_A = {'num_train_timesteps': 1_000}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config(**_UpperCAmelCase )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_A = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Any=0 , **_UpperCAmelCase : Any ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_A = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_A = dummy_past_residuals[:]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : List[str] , **_UpperCAmelCase : Optional[int] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**_UpperCAmelCase )
_A = scheduler_class(**_UpperCAmelCase )
_A = 10
_A = self.dummy_model()
_A = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , 'set_timesteps' ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , 'set_timesteps' ):
_A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_A = dummy_past_residuals[:]
_A = scheduler.timesteps[5]
_A = scheduler.timesteps[6]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ ( self : Tuple ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase , time_step=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase , time_step=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.full_loop()
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 271 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''huggingface/label-files'''
__lowercase = '''imagenet-1k-id2label.json'''
__lowercase = json.load(open(hf_hub_download(A__ , A__ , repo_type='''dataset''' ) , '''r''' ) )
__lowercase = {int(A__ ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__lowercase = BitConfig(
conv_layer=A__ , num_labels=1000 , idalabel=A__ , labelaid=A__ , )
return config
def _A ( A__ ):
"""simple docstring"""
if "stem.conv" in name:
__lowercase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
__lowercase = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
__lowercase = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
__lowercase = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
__lowercase = '''bit.encoder.''' + name
return name
def _A ( ):
"""simple docstring"""
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def _A ( A__ , A__ , A__=False ):
"""simple docstring"""
__lowercase = get_config(A__ )
# load original model from timm
__lowercase = create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model
__lowercase = timm_model.state_dict()
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(A__ )
__lowercase = val.squeeze() if '''head''' in key else val
# load HuggingFace model
__lowercase = BitForImageClassification(A__ )
model.eval()
model.load_state_dict(A__ )
# create image processor
__lowercase = create_transform(**resolve_data_config({} , model=A__ ) )
__lowercase = transform.transforms
__lowercase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
__lowercase = BitImageProcessor(
do_resize=A__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=A__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__lowercase = prepare_img()
__lowercase = transform(A__ ).unsqueeze(0 )
__lowercase = processor(A__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(A__ , A__ )
# verify logits
with torch.no_grad():
__lowercase = model(A__ )
__lowercase = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
__lowercase = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(A__ ).mkdir(exist_ok=A__ )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 104 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase__ : str = NewType("DataClass", Any)
lowercase__ : Union[str, Any] = NewType("DataClassType", Any)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if isinstance(_A , _A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {str(_A ): choice for choice in choices}
return lambda _A : str_to_choice.get(_A , _A )
def lowerCamelCase__ ( *,
_A = None , _A = None , _A = dataclasses.MISSING , _A = dataclasses.MISSING , _A = None , **_A , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
snake_case_ = {}
if aliases is not None:
snake_case_ = aliases
if help is not None:
snake_case_ = help
return dataclasses.field(metadata=_A , default=_A , default_factory=_A , **_A )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = 42
def __init__( self : Optional[Any] , __lowercase : Union[DataClassType, Iterable[DataClassType]] , **__lowercase : Optional[int] ):
"""simple docstring"""
if "formatter_class" not in kwargs:
snake_case_ = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
snake_case_ = [dataclass_types]
snake_case_ = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def snake_case__ ( __lowercase : ArgumentParser , __lowercase : dataclasses.Field ):
"""simple docstring"""
snake_case_ = f"--{field.name}"
snake_case_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
snake_case_ = kwargs.pop("aliases" , [] )
if isinstance(__lowercase , __lowercase ):
snake_case_ = [aliases]
snake_case_ = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__lowercase , "UnionType" ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f" Problem encountered in field '{field.name}'." )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
snake_case_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
snake_case_ = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
snake_case_ = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
snake_case_ = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
snake_case_ = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
snake_case_ = field.type.__args__
else:
snake_case_ = [x.value for x in field.type]
snake_case_ = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
else:
snake_case_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
snake_case_ = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
snake_case_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
snake_case_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
snake_case_ = default
# This tells argparse we accept 0 or 1 value after --field_name
snake_case_ = "?"
# This is the value that will get picked if we do --field_name (without value)
snake_case_ = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
snake_case_ = field.type.__args__[0]
snake_case_ = "+"
if field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
elif field.default is dataclasses.MISSING:
snake_case_ = True
else:
snake_case_ = field.type
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
elif field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
else:
snake_case_ = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
snake_case_ = False
parser.add_argument(f"--no_{field.name}" , action="store_false" , dest=field.name , **__lowercase )
def snake_case__ ( self : List[str] , __lowercase : DataClassType ):
"""simple docstring"""
if hasattr(__lowercase , "_argument_group_name" ):
snake_case_ = self.add_argument_group(dtype._argument_group_name )
else:
snake_case_ = self
try:
snake_case_ = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__lowercase ):
snake_case_ = ".".join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
snake_case_ = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any]=None , __lowercase : Union[str, Any]=False , __lowercase : List[str]=True , __lowercase : int=None , __lowercase : Optional[int]=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
snake_case_ = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
snake_case_ = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
snake_case_ , snake_case_ = args_file_parser.parse_known_args(args=__lowercase )
snake_case_ = vars(__lowercase ).get(args_file_flag.lstrip("-" ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
snake_case_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
snake_case_ = file_args + args if args is not None else file_args + sys.argv[1:]
snake_case_ , snake_case_ = self.parse_known_args(args=__lowercase )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
snake_case_ = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
snake_case_ = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def snake_case__ ( self : int , __lowercase : Dict[str, Any] , __lowercase : bool = False ):
"""simple docstring"""
snake_case_ = set(args.keys() )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
snake_case_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
snake_case_ = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}" )
return tuple(__lowercase )
def snake_case__ ( self : List[Any] , __lowercase : str , __lowercase : bool = False ):
"""simple docstring"""
with open(Path(__lowercase ) , encoding="utf-8" ) as open_json_file:
snake_case_ = json.loads(open_json_file.read() )
snake_case_ = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def snake_case__ ( self : int , __lowercase : str , __lowercase : bool = False ):
"""simple docstring"""
snake_case_ = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase )
| 187 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_A = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 137 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_A = logging.get_logger(__name__)
def lowercase_ ( A__ , A__ , A__ , A__=None , A__=None ) -> str:
"""simple docstring"""
if "." in tensor_name:
snake_case = tensor_name.split("." )
for split in splits[:-1]:
snake_case = getattr(A__ , A__ )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
snake_case = new_module
snake_case = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
snake_case = tensor_name in module._buffers
snake_case = getattr(A__ , A__ )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
snake_case = False
snake_case = False
if is_buffer or not is_bitsandbytes_available():
snake_case = False
snake_case = False
else:
snake_case = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
snake_case = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
snake_case = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
snake_case = old_value.to(A__ )
elif isinstance(A__ , torch.Tensor ):
snake_case = value.to("cpu" )
if value.dtype == torch.inta:
snake_case = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
snake_case = torch.tensor(A__ , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , A__ ) and fpaa_statistics is None:
snake_case = new_value.T
snake_case = old_value.__dict__
if is_abit:
snake_case = bnb.nn.IntaParams(A__ , requires_grad=A__ , **A__ ).to(A__ )
elif is_abit:
snake_case = bnb.nn.Paramsabit(A__ , requires_grad=A__ , **A__ ).to(A__ )
snake_case = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(A__ ) )
else:
if value is None:
snake_case = old_value.to(A__ )
elif isinstance(A__ , torch.Tensor ):
snake_case = value.to(A__ )
else:
snake_case = torch.tensor(A__ , device=A__ )
if is_buffer:
snake_case = new_value
else:
snake_case = nn.Parameter(A__ , requires_grad=old_value.requires_grad )
snake_case = new_value
def lowercase_ ( A__ , A__=None , A__=None , A__=None , A__=False ) -> Optional[Any]:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
snake_case = []
current_key_name.append(A__ )
if (isinstance(A__ , nn.Linear ) or isinstance(A__ , A__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(A__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(A__ , A__ ):
snake_case , snake_case = module.weight.shape
else:
snake_case = module.in_features
snake_case = module.out_features
if quantization_config.quantization_method() == "llm_int8":
snake_case = bnb.nn.LinearabitLt(
A__ , A__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
snake_case = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
snake_case = bnb.nn.Linearabit(
A__ , A__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
snake_case = True
# Store the module class in case we need to transpose the weight later
snake_case = type(A__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(A__ )
if len(list(module.children() ) ) > 0:
snake_case , snake_case = _replace_with_bnb_linear(
A__ , A__ , A__ , A__ , has_been_replaced=A__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( A__ , A__=None , A__=None , A__=None ) -> List[str]:
"""simple docstring"""
snake_case = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
snake_case , snake_case = _replace_with_bnb_linear(
A__ , A__ , A__ , A__ )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowercase_ ( *A__ , **A__ ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , A__ , )
return replace_with_bnb_linear(*A__ , **A__ )
def lowercase_ ( *A__ , **A__ ) -> Any:
"""simple docstring"""
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , A__ , )
return set_module_quantized_tensor_to_device(*A__ , **A__ )
def lowercase_ ( A__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case = deepcopy(A__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
snake_case = find_tied_parameters(A__ )
# For compatibility with Accelerate < 0.18
if isinstance(A__ , A__ ):
snake_case = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case = sum(A__ , [] )
snake_case = len(A__ ) > 0
# Check if it is a base model
snake_case = not hasattr(A__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case = list(model.named_children() )
snake_case = [list_modules[-1][0]]
# add last module together with tied weights
snake_case = set(A__ ) - set(A__ )
snake_case = list(set(A__ ) ) + list(A__ )
# remove ".weight" from the keys
snake_case = [".weight", ".bias"]
snake_case = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case = name.replace(A__ , "" )
filtered_module_names.append(A__ )
return filtered_module_names
| 137 | 1 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__snake_case =[
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def a_ ( lowerCamelCase : Optional[Any]=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowercase ) )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : Dict = None
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> int:
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase = dataset_module_factory(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ )
lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=UpperCAmelCase__ )
lowerCAmelCase = builder_cls(
cache_dir=UpperCAmelCase__ , config_name=UpperCAmelCase__ , hash=dataset_module.hash , )
lowerCAmelCase = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCAmelCase__ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowerCAmelCase = cached_path(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ )
self.assertTrue(os.path.exists(UpperCAmelCase__ ) )
@pytest.mark.integration
def a_ ( lowerCamelCase : Tuple ):
lowerCAmelCase = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowerCAmelCase = dataset_module_factory('wikipedia' , cache_dir=lowerCamelCase )
lowerCAmelCase = import_main_class(dataset_module.module_path )
lowerCAmelCase = builder_cls(
cache_dir=lowerCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCAmelCase = None
builder_instance.download_and_prepare()
lowerCAmelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( lowerCamelCase : Optional[int] ):
lowerCAmelCase = dataset_module_factory('wikipedia' , cache_dir=lowerCamelCase )
lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=lowerCamelCase )
lowerCAmelCase = builder_cls(
cache_dir=lowerCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowerCAmelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCamelCase , lowerCamelCase )
assert "train" in ds
assert isinstance(ds['train'] , lowerCamelCase )
assert next(iter(ds['train'] ) )
| 4 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCAmelCase : float , UpperCAmelCase : Callable , UpperCAmelCase : int , UpperCAmelCase : float = 1.0 , UpperCAmelCase : str = None , ) -> Union[str, Any]:
super().__init__()
__lowerCAmelCase: Optional[Any] = initial_learning_rate
__lowerCAmelCase: str = warmup_steps
__lowerCAmelCase: Optional[int] = power
__lowerCAmelCase: str = decay_schedule_fn
__lowerCAmelCase: Tuple = name
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[int]:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCAmelCase: List[str] = tf.cast(UpperCAmelCase , tf.floataa )
__lowerCAmelCase: Tuple = tf.cast(self.warmup_steps , tf.floataa )
__lowerCAmelCase: List[str] = global_step_float / warmup_steps_float
__lowerCAmelCase: List[str] = self.initial_learning_rate * tf.math.pow(UpperCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCAmelCase , )
def UpperCAmelCase ( self : Tuple ) -> int:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 0.9 , SCREAMING_SNAKE_CASE : float = 0.9_9_9 , SCREAMING_SNAKE_CASE : float = 1E-8 , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=SCREAMING_SNAKE_CASE , )
if num_warmup_steps:
__lowerCAmelCase: Optional[int] = WarmUp(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_schedule_fn=SCREAMING_SNAKE_CASE , warmup_steps=SCREAMING_SNAKE_CASE , )
if weight_decay_rate > 0.0:
__lowerCAmelCase: List[Any] = AdamWeightDecay(
learning_rate=SCREAMING_SNAKE_CASE , weight_decay_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase: Dict = tf.keras.optimizers.Adam(
learning_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.999 , UpperCAmelCase : float = 1E-7 , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "AdamWeightDecay" , **UpperCAmelCase : str , ) -> int:
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: List[Any] = weight_decay_rate
__lowerCAmelCase: List[str] = include_in_weight_decay
__lowerCAmelCase: Optional[Any] = exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Tuple ) -> Optional[int]:
__lowerCAmelCase: Union[str, Any] = {'WarmUp': WarmUp}
return super(UpperCAmelCase , cls ).from_config(UpperCAmelCase , custom_objects=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
super(UpperCAmelCase , self )._prepare_local(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> List[str]:
__lowerCAmelCase: Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase: Tuple = list(zip(*UpperCAmelCase ) )
return super(UpperCAmelCase , self ).apply_gradients(zip(UpperCAmelCase , UpperCAmelCase ) , name=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCAmelCase: Dict = apply_state or {}
__lowerCAmelCase: Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCAmelCase: str = self._fallback_apply_state(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any]=None ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_dense(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any]=None ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase: Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: str = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_sparse(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase: List[str] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return False
return True
class A_ ( snake_case__ ):
def __init__( self : int ) -> List[Any]:
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: int = None
@property
def UpperCAmelCase ( self : Dict ) -> List[Any]:
if self._accum_steps is None:
__lowerCAmelCase: List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCAmelCase : Any ) -> Any:
if not self._gradients:
__lowerCAmelCase: Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCAmelCase ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCAmelCase )
self._accum_steps.assign_add(1 )
def UpperCAmelCase ( self : int ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCAmelCase ) )
| 322 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCAmelCase__ : List[str] ='''src/diffusers'''
# Matches is_xxx_available()
UpperCAmelCase__ : int =re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
UpperCAmelCase__ : Union[str, Any] =re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
UpperCAmelCase__ : Tuple ='''
{0} = None
'''
UpperCAmelCase__ : List[str] ='''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
UpperCAmelCase__ : List[str] ='''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def _lowercase ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase =_re_backend.findall(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
return "_and_".join(_UpperCAmelCase )
def _lowercase ( ) -> Tuple:
with open(os.path.join(_UpperCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase =0
lowerCamelCase ={}
# Go through the end of the file
while line_index < len(_UpperCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase =[]
# Until we unindent, add backend objects to the list
while line_index < len(_UpperCAmelCase ) and len(lines[line_index] ) > 1:
lowerCamelCase =lines[line_index]
lowerCamelCase =_re_single_line_import.search(_UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_UpperCAmelCase ) > 0:
lowerCamelCase =objects
else:
line_index += 1
return backend_specific_objects
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
if name.isupper():
return DUMMY_CONSTANT.format(_UpperCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(_UpperCAmelCase , _UpperCAmelCase )
else:
return DUMMY_CLASS.format(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( _UpperCAmelCase=None ) -> Tuple:
if backend_specific_objects is None:
lowerCamelCase =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase ="""[""" + """, """.join(F"""\"{b}\"""" for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase ="""# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_UpperCAmelCase , _UpperCAmelCase ) for o in objects] )
lowerCamelCase =dummy_file
return dummy_files
def _lowercase ( _UpperCAmelCase=False ) -> Optional[Any]:
lowerCamelCase =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase ={"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase =os.path.join(_UpperCAmelCase , """utils""" )
lowerCamelCase ={
backend: os.path.join(_UpperCAmelCase , F"""dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py""" )
for backend in dummy_files.keys()
}
lowerCamelCase ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase =f.read()
else:
lowerCamelCase =""""""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py as the main """
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F"""diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py. Run `make fix-copies` """
"""to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase__ : str =parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 367 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowercase ( ) -> str:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_UpperCAmelCase ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def _lowercase ( ) -> Union[str, Any]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def _lowercase ( ) -> int:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_UpperCAmelCase ):
http_head("""https://huggingface.co""" )
| 262 | 0 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
if any(not isinstance(lowerCamelCase, lowerCamelCase) or x < 0 for x in sequence):
raise TypeError('''Sequence must be list of non-negative integers''')
for _ in range(len(lowerCamelCase)):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCamelCase, sequence[1:])):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 174 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_UpperCAmelCase : int = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCamelCase : Optional[datasets.Features] = None
def __magic_name__( lowerCamelCase, lowerCamelCase, ):
import pyspark
def generate_fn():
__lowerCAmelCase = df.select('''*''', pyspark.sql.functions.spark_partition_id().alias('''part_id'''))
for partition_id in partition_order:
__lowerCAmelCase = df_with_partition_id.select('''*''').where(F"""part_id = {partition_id}""").drop('''part_id''')
__lowerCAmelCase = partition_df.collect()
__lowerCAmelCase = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class a__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , ):
__lowerCAmelCase = df
__lowerCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() )
__lowerCAmelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__(self ):
yield from self.generate_examples_fn()
def _snake_case (self , __lowercase ):
__lowerCAmelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__lowercase )
return SparkExamplesIterable(self.df , partition_order=__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = self.split_shard_indices_by_worker(__lowercase , __lowercase )
return SparkExamplesIterable(self.df , partition_order=__lowercase )
@property
def _snake_case (self ):
return len(self.partition_order )
class a__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCamelCase : int = SparkConfig
def __init__(self , __lowercase , __lowercase = None , __lowercase = None , **__lowercase , ):
import pyspark
__lowerCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate()
__lowerCAmelCase = df
__lowerCAmelCase = working_dir
super().__init__(
cache_dir=__lowercase , config_name=str(self.df.semanticHash() ) , **__lowercase , )
def _snake_case (self ):
# Returns the path of the created file.
def create_cache_and_write_probe(__lowercase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__lowercase )
__lowerCAmelCase = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__lowercase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowerCAmelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowercase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def _snake_case (self ):
return datasets.DatasetInfo(features=self.config.features )
def _snake_case (self , __lowercase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _snake_case (self , __lowercase ):
import pyspark
def get_arrow_batch_size(__lowercase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
__lowerCAmelCase = self.df.count()
__lowerCAmelCase = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowerCAmelCase = (
self.df.limit(__lowercase )
.repartition(1 )
.mapInArrow(__lowercase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowerCAmelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowerCAmelCase = min(__lowercase , int(approx_total_size / max_shard_size ) )
__lowerCAmelCase = self.df.repartition(__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase , ):
import pyspark
__lowerCAmelCase = ParquetWriter if file_format == '''parquet''' else ArrowWriter
__lowerCAmelCase = os.path.join(self._working_dir , os.path.basename(__lowercase ) ) if self._working_dir else fpath
__lowerCAmelCase = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowerCAmelCase = self.config.features
__lowerCAmelCase = self._writer_batch_size
__lowerCAmelCase = self._fs.storage_options
def write_arrow(__lowercase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowerCAmelCase = pyspark.TaskContext().taskAttemptId()
__lowerCAmelCase = next(__lowercase , __lowercase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
__lowerCAmelCase = 0
__lowerCAmelCase = writer_class(
features=__lowercase , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=__lowercase , storage_options=__lowercase , embed_local_files=__lowercase , )
__lowerCAmelCase = pa.Table.from_batches([first_batch] )
writer.write_table(__lowercase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
__lowerCAmelCase = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=__lowercase , storage_options=__lowercase , embed_local_files=__lowercase , )
__lowerCAmelCase = pa.Table.from_batches([batch] )
writer.write_table(__lowercase )
if writer._num_bytes > 0:
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__lowercase ) ):
__lowerCAmelCase = os.path.join(os.path.dirname(__lowercase ) , os.path.basename(__lowercase ) )
shutil.move(__lowercase , __lowercase )
__lowerCAmelCase = (
self.df.mapInArrow(__lowercase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _snake_case (self , __lowercase , __lowercase = "arrow" , __lowercase = None , __lowercase = None , **__lowercase , ):
self._validate_cache_dir()
__lowerCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__lowercase )
__lowerCAmelCase = not is_remote_filesystem(self._fs )
__lowerCAmelCase = os.path.join if is_local else posixpath.join
__lowerCAmelCase = '''-TTTTT-SSSSS-of-NNNNN'''
__lowerCAmelCase = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
__lowerCAmelCase = path_join(self._output_dir , __lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = []
for task_id, content in self._prepare_split_single(__lowercase , __lowercase , __lowercase ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__lowercase )
__lowerCAmelCase = total_num_examples
__lowerCAmelCase = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
__lowerCAmelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowerCAmelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowercase , __lowercase , __lowercase , ):
rename(
__lowercase , fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , F"""{global_shard_id:05d}""" ).replace('''NNNNN''' , F"""{total_shards:05d}""" ) , )
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i in range(len(__lowercase ) ):
__lowerCAmelCase , __lowerCAmelCase = task_id_and_num_shards[i]
for shard_id in range(__lowercase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__lowercase , len(__lowercase ) ).map(lambda __lowercase : _rename_shard(*__lowercase ) ).collect()
else:
# don't use any pattern
__lowerCAmelCase = 0
__lowerCAmelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace(__lowercase , '''''' ) , )
def _snake_case (self , __lowercase , ):
return SparkExamplesIterable(self.df )
| 174 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _a ( lowerCamelCase: List[Any] ) -> List[Any]:
'''simple docstring'''
__A = SwinConfig(image_size=1_92 )
if "base" in model_name:
__A = 6
__A = 1_28
__A = (2, 2, 18, 2)
__A = (4, 8, 16, 32)
elif "large" in model_name:
__A = 12
__A = 1_92
__A = (2, 2, 18, 2)
__A = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__A = window_size
__A = embed_dim
__A = depths
__A = num_heads
return config
def _a ( lowerCamelCase: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if "encoder.mask_token" in name:
__A = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__A = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__A = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__A = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__A = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__A = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__A = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__A = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__A = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__A = "layernorm.weight"
if name == "encoder.norm.bias":
__A = "layernorm.bias"
if "decoder" in name:
pass
else:
__A = "swin." + name
return name
def _a ( lowerCamelCase: Tuple , lowerCamelCase: int ) -> Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__A = orig_state_dict.pop(__lowerCamelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
__A = key.split('''.''' )
__A = int(key_split[2] )
__A = int(key_split[4] )
__A = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__A = val[:dim, :]
__A = val[
dim : dim * 2, :
]
__A = val[-dim:, :]
else:
__A = val[
:dim
]
__A = val[
dim : dim * 2
]
__A = val[
-dim:
]
else:
__A = val
return orig_state_dict
def _a ( lowerCamelCase: Any , lowerCamelCase: List[Any] , lowerCamelCase: int , lowerCamelCase: Optional[int] ) -> List[Any]:
'''simple docstring'''
__A = torch.load(__lowerCamelCase , map_location='''cpu''' )["model"]
__A = get_swin_config(__lowerCamelCase )
__A = SwinForMaskedImageModeling(__lowerCamelCase )
model.eval()
__A = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
__A = "http://images.cocodataset.org/val2017/000000039769.jpg"
__A = ViTImageProcessor(size={'''height''': 1_92, '''width''': 1_92} )
__A = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
__A = image_processor(images=__lowerCamelCase , return_tensors='''pt''' )
with torch.no_grad():
__A = model(**__lowerCamelCase ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you\'d like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
snake_case__ : List[str] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 351 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct_text_model"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self :Any , _UpperCamelCase :int=5_0244 , _UpperCamelCase :Optional[Any]=768 , _UpperCamelCase :Optional[Any]=64 , _UpperCamelCase :Dict=2048 , _UpperCamelCase :int=12 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Optional[int]=32 , _UpperCamelCase :Dict=128 , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :List[str]=1e-6 , _UpperCamelCase :Optional[Any]=1.0 , _UpperCamelCase :Union[str, Any]="gelu_new" , _UpperCamelCase :int=0 , _UpperCamelCase :int=False , _UpperCamelCase :int=0 , _UpperCamelCase :Dict=1 , _UpperCamelCase :Any=False , _UpperCamelCase :Optional[Any]=True , **_UpperCamelCase :Tuple , )-> Dict:
__A = vocab_size
__A = hidden_size
__A = d_kv
__A = d_ff
__A = num_layers
__A = num_heads
__A = relative_attention_num_buckets
__A = relative_attention_max_distance
__A = dropout_rate
__A = layer_norm_epsilon
__A = initializer_factor
__A = use_cache
__A = eos_token_id
__A = decoder_start_token_id
# for backwards compatibility
__A = dense_act_fn
super().__init__(
pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , is_decoder=_UpperCamelCase , **_UpperCamelCase , )
@classmethod
def _lowerCAmelCase (cls :List[str] , _UpperCamelCase :Union[str, os.PathLike] , **_UpperCamelCase :List[Any] )-> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCamelCase )
__A , __A = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__A = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct_vision_model"""
def __init__(self :Dict , _UpperCamelCase :Optional[Any]=768 , _UpperCamelCase :List[str]=768 , _UpperCamelCase :Any=2048 , _UpperCamelCase :Tuple=64 , _UpperCamelCase :int=12 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Tuple="gelu_new" , _UpperCamelCase :Dict=1e-6 , _UpperCamelCase :int=0.0 , _UpperCamelCase :int=0.0 , _UpperCamelCase :Union[str, Any]=1e-10 , _UpperCamelCase :Tuple=1.0 , _UpperCamelCase :Tuple=4096 , _UpperCamelCase :List[str]=32 , _UpperCamelCase :Optional[Any]=128 , **_UpperCamelCase :List[str] , )-> Any:
super().__init__(**_UpperCamelCase )
__A = hidden_size
__A = patch_embed_hidden_size
__A = d_ff
__A = dropout_rate
__A = num_hidden_layers
__A = num_attention_heads
__A = initializer_range
__A = initializer_factor
__A = attention_dropout
__A = layer_norm_eps
__A = dense_act_fn
__A = seq_len
__A = relative_attention_num_buckets
__A = relative_attention_max_distance
__A = d_kv
@classmethod
def _lowerCAmelCase (cls :List[str] , _UpperCamelCase :Union[str, os.PathLike] , **_UpperCamelCase :List[str] )-> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCamelCase )
__A , __A = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__A = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """pix2struct"""
lowerCAmelCase__ = True
def __init__(self :List[Any] , _UpperCamelCase :str=None , _UpperCamelCase :int=None , _UpperCamelCase :List[Any]=1.0 , _UpperCamelCase :int=0.0_2 , _UpperCamelCase :List[str]=False , _UpperCamelCase :Optional[Any]=False , _UpperCamelCase :int=True , **_UpperCamelCase :Any , )-> Optional[Any]:
super().__init__(tie_word_embeddings=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
if text_config is None:
__A = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
__A = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
__A = PixaStructTextConfig(**_UpperCamelCase )
__A = PixaStructVisionConfig(**_UpperCamelCase )
__A = self.text_config.decoder_start_token_id
__A = self.text_config.pad_token_id
__A = self.text_config.eos_token_id
__A = initializer_factor
__A = initializer_range
__A = self.initializer_range
__A = self.initializer_range
__A = is_vqa
@classmethod
def _lowerCAmelCase (cls :str , _UpperCamelCase :PixaStructTextConfig , _UpperCamelCase :PixaStructVisionConfig , **_UpperCamelCase :Union[str, Any] )-> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] )-> int:
__A = copy.deepcopy(self.__dict__ )
__A = self.text_config.to_dict()
__A = self.vision_config.to_dict()
__A = self.__class__.model_type
return output
| 250 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : str , a__ : Union[str, "sqlalchemy.sql.Selectable"] , a__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , a__ : Optional[Features] = None , a__ : str = None , a__ : bool = False , **a__ : Dict , ):
"""simple docstring"""
super().__init__(features=a__ , cache_dir=a__ , keep_in_memory=a__ , **a__ )
__snake_case = Sql(
cache_dir=a__ , features=a__ , sql=a__ , con=a__ , **a__ , )
def a (self : int ):
"""simple docstring"""
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
self.builder.download_and_prepare(
download_config=a__ , download_mode=a__ , verification_mode=a__ , base_path=a__ , )
# Build dataset for splits
__snake_case = self.builder.as_dataset(
split='''train''' , verification_mode=a__ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
def __init__(self : int , a__ : Dataset , a__ : str , a__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , a__ : Optional[int] = None , a__ : Optional[int] = None , **a__ : str , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__snake_case = dataset
__snake_case = name
__snake_case = con
__snake_case = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case = num_proc
__snake_case = to_sql_kwargs
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.to_sql_kwargs.pop('''sql''' , a__ )
__snake_case = self.to_sql_kwargs.pop('''con''' , a__ )
__snake_case = self.to_sql_kwargs.pop('''index''' , a__ )
__snake_case = self._write(index=a__ , **self.to_sql_kwargs )
return written
def a (self : Optional[Any] , a__ : List[str] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = args
__snake_case = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__snake_case = query_table(
table=self.dataset.data , key=slice(a__ , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case = batch.to_pandas()
__snake_case = df.to_sql(self.name , self.con , index=a__ , **a__ )
return num_rows or len(a__ )
def a (self : Union[str, Any] , a__ : Dict , **a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__snake_case , __snake_case = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a__ , a__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 24 | import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
_UpperCAmelCase = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase = {
"""RUCAIBox/mvp""": 1024,
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['''input_ids''', '''attention_mask''']
lowerCamelCase_ = MvpTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ):
"""simple docstring"""
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
A_ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase ) != add_prefix_space:
A_ : int = getattr(lowercase , pre_tok_state.pop('type' ) )
A_ : Union[str, Any] = add_prefix_space
A_ : Dict = pre_tok_class(**lowercase )
A_ : Union[str, Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : Any = 'post_processor'
A_ : List[str] = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
A_ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : int = tuple(state['sep'] )
if "cls" in state:
A_ : Optional[int] = tuple(state['cls'] )
A_ : Tuple = False
if state.get('add_prefix_space' , lowercase ) != add_prefix_space:
A_ : Union[str, Any] = add_prefix_space
A_ : Tuple = True
if state.get('trim_offsets' , lowercase ) != trim_offsets:
A_ : str = trim_offsets
A_ : str = True
if changes_to_apply:
A_ : List[str] = getattr(lowercase , state.pop('type' ) )
A_ : List[str] = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
A_ : Dict = value
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
A_ : Any = kwargs.get('is_split_into_words' , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase , **lowercase )
def lowerCAmelCase_ ( self , *lowercase , **lowercase ):
"""simple docstring"""
A_ : Dict = kwargs.get('is_split_into_words' , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Any = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
A_ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 140 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase )
_A = AutoTokenizer.from_pretrained("google/mt5-small" )
_A = tokenizer("Hello there" , return_tensors="pt" ).input_ids
_A = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
_A = model(input_ids.to(__UpperCAmelCase ) , labels=labels.to(__UpperCAmelCase ) ).loss
_A = -(labels.shape[-1] * loss.item())
_A = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 174 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''T5Config'''
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> jnp.ndarray:
'''simple docstring'''
_A = jnp.zeros_like(__lowercase )
_A = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_A = shifted_input_ids.at[:, 0].set(__lowercase )
_A = jnp.where(shifted_input_ids == -100 , __lowercase , __lowercase )
return shifted_input_ids
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
| 174 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( A_, A_, A_=None, A_=None ):
'''simple docstring'''
if attention_mask is None:
__magic_name__ = tf.cast(tf.math.not_equal(A_, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
a__ = OPTConfig
a__ = {}
a__ = """gelu"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : Tuple=99 , UpperCamelCase__ : str=16 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[str]=20 , UpperCamelCase__ : int=2 , UpperCamelCase__ : str=1 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : str=16 , ) -> str:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = embed_dim
__magic_name__ = word_embed_proj_dim
__magic_name__ = False
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=UpperCamelCase__ , **self.config_updates , )
__magic_name__ = prepare_opt_inputs_dict(UpperCamelCase__ , UpperCamelCase__ )
return config, inputs_dict
def _lowercase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = TFOPTModel(config=UpperCamelCase__ )
__magic_name__ = inputs_dict["""input_ids"""]
__magic_name__ = input_ids[:1, :]
__magic_name__ = inputs_dict["""attention_mask"""][:1, :]
__magic_name__ = 1
# first forward pass
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
__magic_name__ , __magic_name__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
@require_tf
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a__ = (TFOPTForCausalLM,) if is_tf_available() else ()
a__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
a__ = False
a__ = False
a__ = False
a__ = 10
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = TFOPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(UpperCamelCase__ : Any , UpperCamelCase__ : str ):
if hasattr(UpperCamelCase__ , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(UpperCamelCase__ , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__magic_name__ = model_class(config=UpperCamelCase__ )
__magic_name__ = _get_word_embedding_weight(UpperCamelCase__ , model.get_input_embeddings() )
__magic_name__ = _get_word_embedding_weight(UpperCamelCase__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(UpperCamelCase__ )
__magic_name__ = _get_word_embedding_weight(UpperCamelCase__ , model.get_input_embeddings() )
__magic_name__ = _get_word_embedding_weight(UpperCamelCase__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__magic_name__ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , UpperCamelCase__ )
# check that weights remain the same after resizing
__magic_name__ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__magic_name__ = False
self.assertTrue(UpperCamelCase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , UpperCamelCase__ )
__magic_name__ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__magic_name__ = False
self.assertTrue(UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
return tf.constant(A_, dtype=tf.intaa )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = 99
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__magic_name__ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__magic_name__ = input_ids.shape[0]
__magic_name__ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
__magic_name__ = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__magic_name__ = tf.not_equal(UpperCamelCase__ , model.config.pad_token_id )
with tf.GradientTape():
__magic_name__ = model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ ).last_hidden_state
__magic_name__ = (1, 11, 512)
self.assertEqual(output.shape , UpperCamelCase__ )
__magic_name__ = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=4E-3 ) )
__magic_name__ = tf.function(UpperCamelCase__ , jit_compile=UpperCamelCase__ )
__magic_name__ = xla_generate(UpperCamelCase__ , UpperCamelCase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=4E-2 ) )
@require_tf
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
__magic_name__ = """facebook/opt-350m"""
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = TFOPTForCausalLM.from_pretrained(self.path_model )
__magic_name__ = GPTaTokenizer.from_pretrained(self.path_model )
__magic_name__ = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""tf""" , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
__magic_name__ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__magic_name__ = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-4 ) )
__magic_name__ = tf.function(UpperCamelCase__ , jit_compile=UpperCamelCase__ )
__magic_name__ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-4 ) )
@require_tf
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """facebook/opt-125m"""
__magic_name__ = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__magic_name__ = []
__magic_name__ = GPTaTokenizer.from_pretrained(UpperCamelCase__ )
__magic_name__ = TFOPTForCausalLM.from_pretrained(UpperCamelCase__ )
for prompt in self.prompts:
__magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""tf""" ).input_ids
__magic_name__ = model.generate(UpperCamelCase__ , max_length=10 )
__magic_name__ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
__magic_name__ = """facebook/opt-350m"""
__magic_name__ = GPTaTokenizer.from_pretrained(UpperCamelCase__ )
__magic_name__ = TFOPTForCausalLM.from_pretrained(UpperCamelCase__ )
__magic_name__ = """left"""
# use different length sentences to test batching
__magic_name__ = [
"""Hello, my dog is a little""",
"""Today, I""",
]
__magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""tf""" , padding=UpperCamelCase__ )
__magic_name__ = inputs["""input_ids"""]
__magic_name__ = model.generate(input_ids=UpperCamelCase__ , attention_mask=inputs["""attention_mask"""] )
__magic_name__ = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
__magic_name__ = model.generate(input_ids=UpperCamelCase__ )
__magic_name__ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
__magic_name__ = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
__magic_name__ = model.generate(input_ids=UpperCamelCase__ , max_length=model.config.max_length - num_paddings )
__magic_name__ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
__magic_name__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase__ )
__magic_name__ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase__ )
__magic_name__ = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [non_padded_sentence, padded_sentence] )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """facebook/opt-350m"""
__magic_name__ = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__magic_name__ = []
__magic_name__ = GPTaTokenizer.from_pretrained(UpperCamelCase__ )
__magic_name__ = TFOPTForCausalLM.from_pretrained(UpperCamelCase__ )
for prompt in self.prompts:
__magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""tf""" ).input_ids
__magic_name__ = model.generate(UpperCamelCase__ , max_length=10 )
__magic_name__ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 88 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = torch.nn.Linear(10 , 10 )
__magic_name__ = torch.optim.SGD(model.parameters() , 0.1 )
__magic_name__ = Accelerator()
__magic_name__ = accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 88 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__: str ={"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Union[str, Any] =["GLPNFeatureExtractor"]
__magic_name__: Dict =["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Any =[
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__magic_name__: int =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase ( _A ):
"""simple docstring"""
if not isinstance(_A, _A ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__magic_name__ : Dict = precision
__magic_name__ : str = ceil(precision / 14 )
__magic_name__ : List[str] = 426880 * Decimal(10005 ).sqrt()
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = 13591409
__magic_name__ : Tuple = Decimal(_A )
for k in range(1, _A ):
__magic_name__ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_A ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__magic_name__: Tuple = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 138 | 0 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class A__ ( __magic_name__ ):
@staticmethod
def _lowerCamelCase ( a : ArgumentParser ):
'''simple docstring'''
lowerCAmelCase__ : str = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=a , default=a , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=a , help='Name of the model to download' )
download_parser.set_defaults(func=a )
def __init__( self : Optional[int] , a : str , a : str , a : bool , a : bool ):
'''simple docstring'''
lowerCAmelCase__ : str = model
lowerCAmelCase__ : str = cache
lowerCAmelCase__ : Optional[Any] = force
lowerCAmelCase__ : int = trust_remote_code
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) | 212 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list:
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ : Optional[Any] = []
def generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase__ , lowerCAmelCase__ : str = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
generate(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr)) | 212 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 176 |
def SCREAMING_SNAKE_CASE__ ( lowercase = 1000 ) -> int:
snake_case : Optional[int] = 3
snake_case : List[Any] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 176 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase : Any = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ['GLPNFeatureExtractor']
__lowercase : Union[str, Any] = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = "M-CLIP"
def __init__( self , __A=1024 , __A=768 , **__A ):
"""simple docstring"""
lowerCamelCase : str = transformerDimSize
lowerCamelCase : Any = imageDimSize
super().__init__(**__A )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = MCLIPConfig
def __init__( self , __A , *__A , **__A ):
"""simple docstring"""
super().__init__(__A , *__A , **__A )
lowerCamelCase : Tuple = XLMRobertaModel(__A )
lowerCamelCase : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Any = self.transformer(input_ids=__A , attention_mask=__A )[0]
lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__A ), embs
| 283 | 0 |
from ....utils import logging
lowerCAmelCase__ : Union[str, Any] =logging.get_logger(__name__)
class UpperCAmelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , _A , _A=None , _A=2_048 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = config.__dict__
__SCREAMING_SNAKE_CASE = modal_hidden_size
if num_labels:
__SCREAMING_SNAKE_CASE = num_labels
| 350 |
import os
def __lowercase ( a__ = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(a__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , a__ ):
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 118 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
return list(tensor.shape )
lowercase = tf.shape(__SCREAMING_SNAKE_CASE )
if tensor.shape == tf.TensorShape(__SCREAMING_SNAKE_CASE ):
return dynamic
lowercase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__SCREAMING_SNAKE_CASE )]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
lowercase , lowercase = tf.nn.moments(__SCREAMING_SNAKE_CASE , axes=[axis] , keepdims=__SCREAMING_SNAKE_CASE )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase = [1] * inputs.shape.rank
lowercase = shape_list(__SCREAMING_SNAKE_CASE )[axis]
lowercase = tf.reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = tf.reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Compute layer normalization using the batch_normalization
# function.
lowercase = tf.nn.batch_normalization(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , offset=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , variance_epsilon=__SCREAMING_SNAKE_CASE , )
return outputs
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase = tf.shape(__SCREAMING_SNAKE_CASE )
lowercase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , tf.Tensor ):
lowercase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "input_ids" ):
tf.debugging.assert_less(
__SCREAMING_SNAKE_CASE , tf.cast(__SCREAMING_SNAKE_CASE , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(__SCREAMING_SNAKE_CASE )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase = [x for x in data if len(__SCREAMING_SNAKE_CASE ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
lowercase = np.asarray(__SCREAMING_SNAKE_CASE )
lowercase = 1
lowercase = np.array_split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase = np.array_split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase = chunk_data
else:
lowercase = data
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if name in group.attrs:
lowercase = [n.decode('utf8' ) if hasattr(__SCREAMING_SNAKE_CASE , 'decode' ) else n for n in group.attrs[name]]
else:
lowercase = []
lowercase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(__SCREAMING_SNAKE_CASE , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
def _expand_single_ad_tensor(__SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__SCREAMING_SNAKE_CASE , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __SCREAMING_SNAKE_CASE )
| 195 |
import pprint
import requests
UpperCAmelCase = '''https://zenquotes.io/api'''
def UpperCAmelCase_ ( ):
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def UpperCAmelCase_ ( ):
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
UpperCAmelCase = random_quotes()
pprint.pprint(response)
| 195 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_A = "\\n Text data.\n Second line of data."
_A = "file"
@pytest.fixture(scope='''session''' )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
lowerCAmelCase_ = bytes(_A , '''utf-8''' )
with zstd.open(_A , '''wb''' ) as f:
f.write(_A )
return path
@pytest.fixture
def __UpperCamelCase ( _A ):
with open(os.path.join(tmpfs.local_root_dir , _A ) , '''w''' ) as f:
f.write(_A )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __UpperCamelCase ( _A , _A , _A , _A , _A , _A ):
lowerCAmelCase_ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
lowerCAmelCase_ = input_paths[compression_format]
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = DownloadConfig(cache_dir=_A , extract_compressed_file=_A )
lowerCAmelCase_ = cached_path(_A , download_config=_A )
with open(_A ) as f:
lowerCAmelCase_ = f.read()
with open(_A ) as f:
lowerCAmelCase_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __UpperCamelCase ( _A , _A , _A , _A , _A ):
lowerCAmelCase_ = '''custom_cache'''
lowerCAmelCase_ = '''custom_extracted_dir'''
lowerCAmelCase_ = tmp_path / '''custom_extracted_path'''
if default_extracted:
lowerCAmelCase_ = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _A )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_A ) )
lowerCAmelCase_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowerCAmelCase_ = xz_file
lowerCAmelCase_ = (
DownloadConfig(extract_compressed_file=_A )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_A )
)
lowerCAmelCase_ = cached_path(_A , download_config=_A )
assert Path(_A ).parent.parts[-2:] == expected
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = str(Path(_A ).resolve() )
assert cached_path(_A ) == text_file
# relative path
lowerCAmelCase_ = str(Path(_A ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_A ) == text_file
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_A ):
cached_path(_A )
# relative path
lowerCAmelCase_ = '''./__missing_file__.txt'''
with pytest.raises(_A ):
cached_path(_A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = get_from_cache(f"tmp://{tmpfs_file}" )
with open(_A ) as f:
lowerCAmelCase_ = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _A )
def __UpperCamelCase ( ):
with pytest.raises(_A ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_A ):
http_get('''https://huggingface.co''' , temp_file=_A )
with pytest.raises(_A ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_A ):
ftp_get('''ftp://huggingface.co''' , temp_file=_A )
with pytest.raises(_A ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_A ):
fsspec_get('''s3://huggingface.co''' , temp_file=_A )
with pytest.raises(_A ):
fsspec_head('''s3://huggingface.co''' )
| 357 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase_ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase_ = dict(zip(UpperCamelCase__, range(len(UpperCamelCase__ ) ) ) )
lowerCAmelCase_ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
lowerCAmelCase_ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
lowerCAmelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase_ = os.path.join(self.tmpdirname, UpperCamelCase__ )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(UpperCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=UpperCamelCase__ )
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer, UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowerCAmelCase_ = self.get_image_processor(do_normalize=UpperCamelCase__, padding_value=1.0 )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=UpperCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''np''' )
lowerCAmelCase_ = processor(images=UpperCamelCase__, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = processor(text=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(text=UpperCamelCase__, images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(images=UpperCamelCase__, visual_prompt=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ = processor.batch_decode(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
| 167 | 0 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
while b:
UpperCamelCase__ , UpperCamelCase__ : int = b, a % b
return a
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def SCREAMING_SNAKE_CASE ( ) -> str:
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main() | 189 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[Any] =logging.get_logger(__name__)
lowerCamelCase : Optional[int] ={
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __a ( A__ ):
_lowerCAmelCase : Optional[int] = '''owlvit_text_model'''
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any]=4_94_08 , SCREAMING_SNAKE_CASE : List[str]=5_12 , SCREAMING_SNAKE_CASE : List[Any]=20_48 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=8 , SCREAMING_SNAKE_CASE : Dict=16 , SCREAMING_SNAKE_CASE : Union[str, Any]="quick_gelu" , SCREAMING_SNAKE_CASE : List[str]=1e-5 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : Any=0.0_2 , SCREAMING_SNAKE_CASE : int=1.0 , SCREAMING_SNAKE_CASE : Any=0 , SCREAMING_SNAKE_CASE : int=4_94_06 , SCREAMING_SNAKE_CASE : List[str]=4_94_07 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = vocab_size
UpperCamelCase__ : int = hidden_size
UpperCamelCase__ : List[str] = intermediate_size
UpperCamelCase__ : Tuple = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : Any = max_position_embeddings
UpperCamelCase__ : List[Any] = hidden_act
UpperCamelCase__ : str = layer_norm_eps
UpperCamelCase__ : List[Any] = attention_dropout
UpperCamelCase__ : Tuple = initializer_range
UpperCamelCase__ : Optional[Any] = initializer_factor
@classmethod
def __lowercase ( cls : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCamelCase__ : Dict = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class __a ( A__ ):
_lowerCAmelCase : str = '''owlvit_vision_model'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : str=7_68 , SCREAMING_SNAKE_CASE : Dict=30_72 , SCREAMING_SNAKE_CASE : int=12 , SCREAMING_SNAKE_CASE : Union[str, Any]=12 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=7_68 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : Dict="quick_gelu" , SCREAMING_SNAKE_CASE : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Dict=0.0_2 , SCREAMING_SNAKE_CASE : Optional[int]=1.0 , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = hidden_size
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : int = num_channels
UpperCamelCase__ : Union[str, Any] = image_size
UpperCamelCase__ : List[Any] = patch_size
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : Optional[int] = layer_norm_eps
UpperCamelCase__ : Optional[Any] = attention_dropout
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : int = initializer_factor
@classmethod
def __lowercase ( cls : Dict , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ : List[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCamelCase__ : List[str] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class __a ( A__ ):
_lowerCAmelCase : str = '''owlvit'''
_lowerCAmelCase : Tuple = True
def __init__( self : Any , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : str=5_12 , SCREAMING_SNAKE_CASE : Any=2.6_5_9_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , **SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
if text_config is None:
UpperCamelCase__ : str = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCamelCase__ : List[str] = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCamelCase__ : Dict = OwlViTTextConfig(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = projection_dim
UpperCamelCase__ : Union[str, Any] = logit_scale_init_value
UpperCamelCase__ : int = return_dict
UpperCamelCase__ : Tuple = 1.0
@classmethod
def __lowercase ( cls : Any , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ : Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@classmethod
def __lowercase ( cls : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = {}
UpperCamelCase__ : Union[str, Any] = text_config
UpperCamelCase__ : Optional[int] = vision_config
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = copy.deepcopy(self.__dict__ )
UpperCamelCase__ : Union[str, Any] = self.text_config.to_dict()
UpperCamelCase__ : List[str] = self.vision_config.to_dict()
UpperCamelCase__ : Optional[int] = self.__class__.model_type
return output
class __a ( A__ ):
@property
def __lowercase ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return 1e-4
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : "ProcessorMixin" , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = super().generate_dummy_inputs(
processor.image_processor , batch_size=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
return {**text_input_dict, **image_input_dict}
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return 14 | 189 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase__ = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowercase__ = {"""facebook/blenderbot-3B""": 128}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Dict = VOCAB_FILES_NAMES
a_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[int] = ["""input_ids""", """attention_mask"""]
a_ : int = BlenderbotTokenizer
def __init__( self : Optional[Any] , a_ : Union[str, Any]=None , a_ : Any=None , a_ : int=None , a_ : str="replace" , a_ : Tuple="<s>" , a_ : Optional[int]="</s>" , a_ : Union[str, Any]="</s>" , a_ : Union[str, Any]="<s>" , a_ : Optional[Any]="<unk>" , a_ : str="<pad>" , a_ : List[Any]="<mask>" , a_ : Tuple=False , a_ : Dict=True , **a_ : str , ):
super().__init__(
a_ , a_ , tokenizer_file=a_ , errors=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , trim_offsets=a_ , **a_ , )
lowerCAmelCase_ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCAmelCase_ : str = getattr(a_ , pre_tok_state.pop("type" ) )
lowerCAmelCase_ : int = add_prefix_space
lowerCAmelCase_ : List[Any] = pre_tok_class(**a_ )
lowerCAmelCase_ : Any = add_prefix_space
lowerCAmelCase_ : str = "post_processor"
lowerCAmelCase_ : str = getattr(self.backend_tokenizer , a_ , a_ )
if tokenizer_component_instance:
lowerCAmelCase_ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ : Dict = tuple(state["sep"] )
if "cls" in state:
lowerCAmelCase_ : Optional[int] = tuple(state["cls"] )
lowerCAmelCase_ : Optional[int] = False
if state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCAmelCase_ : List[str] = add_prefix_space
lowerCAmelCase_ : Any = True
if state.get("trim_offsets" , a_ ) != trim_offsets:
lowerCAmelCase_ : int = trim_offsets
lowerCAmelCase_ : List[str] = True
if changes_to_apply:
lowerCAmelCase_ : Optional[Any] = getattr(a_ , state.pop("type" ) )
lowerCAmelCase_ : Tuple = component_class(**a_ )
setattr(self.backend_tokenizer , a_ , a_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase ( self : int ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase ( self : int , a_ : List[Any] ):
lowerCAmelCase_ : Optional[Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else value
lowerCAmelCase_ : Tuple = value
def lowerCamelCase ( self : int , *a_ : List[str] , **a_ : Optional[int] ):
lowerCAmelCase_ : Tuple = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_ , **a_ )
def lowerCamelCase ( self : str , *a_ : Union[str, Any] , **a_ : List[str] ):
lowerCAmelCase_ : Tuple = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_ , **a_ )
def lowerCamelCase ( self : int , a_ : str , a_ : Optional[str] = None ):
lowerCAmelCase_ : str = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def lowerCamelCase ( self : int , a_ : List[int] , a_ : Optional[List[int]] = None ):
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : Union[str, Any] , a_ : "Conversation" ):
lowerCAmelCase_ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(a_ )
lowerCAmelCase_ : Tuple = " ".join(a_ )
lowerCAmelCase_ : Any = self.encode(a_ )
if len(a_ ) > self.model_max_length:
lowerCAmelCase_ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 161 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
if is_torch_available():
import torch
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ) -> Dict:
"""simple docstring"""
if rng is None:
lowerCAmelCase_ : int = global_rng
lowerCAmelCase_ : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , a_ : Dict , a_ : Dict=7 , a_ : int=4_00 , a_ : Union[str, Any]=20_00 , a_ : Any=1 , a_ : Optional[int]=0.0 , a_ : str=1_60_00 , a_ : Optional[int]=True , a_ : Dict=True , ):
lowerCAmelCase_ : Tuple = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Optional[int] = min_seq_length
lowerCAmelCase_ : List[Any] = max_seq_length
lowerCAmelCase_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase_ : Dict = feature_size
lowerCAmelCase_ : Tuple = padding_value
lowerCAmelCase_ : int = sampling_rate
lowerCAmelCase_ : str = return_attention_mask
lowerCAmelCase_ : Union[str, Any] = do_normalize
def lowerCamelCase ( self : Dict ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase ( self : List[Any] , a_ : List[Any]=False , a_ : Optional[int]=False ):
def _flatten(a_ : Optional[Any] ):
return list(itertools.chain(*a_ ) )
if equal_length:
lowerCAmelCase_ : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase_ : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase_ : List[Any] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple = ASTFeatureExtractor
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = ASTFeatureExtractionTester(self )
def lowerCamelCase ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : str = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase_ : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase_ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
lowerCAmelCase_ : Tuple = feat_extract(a_ , padding=a_ , return_tensors="np" ).input_values
lowerCAmelCase_ : int = feat_extract(a_ , padding=a_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCAmelCase_ : Union[str, Any] = np.asarray(a_ )
lowerCAmelCase_ : str = feat_extract(a_ , return_tensors="np" ).input_values
lowerCAmelCase_ : List[Any] = feat_extract(a_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
@require_torch
def lowerCamelCase ( self : List[str] ):
import torch
lowerCAmelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : Tuple = np.random.rand(1_00 ).astype(np.floataa )
lowerCAmelCase_ : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase_ : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase_ : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCamelCase ( self : List[Any] , a_ : List[str] ):
from datasets import load_dataset
lowerCAmelCase_ : Union[str, Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCAmelCase_ : Optional[int] = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def lowerCamelCase ( self : str ):
# fmt: off
lowerCAmelCase_ : Tuple = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
lowerCAmelCase_ : Dict = self._load_datasamples(1 )
lowerCAmelCase_ : Union[str, Any] = ASTFeatureExtractor()
lowerCAmelCase_ : int = feature_extractor(a_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
| 161 | 1 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 10, _UpperCAmelCase = 22 ) -> int:
'''simple docstring'''
lowerCAmelCase : List[str] = range(1, _UpperCAmelCase )
lowerCAmelCase : str = range(1, _UpperCAmelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'{solution(10, 22) = }')
| 138 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : int = (KDPMaDiscreteScheduler,)
a_ : List[str] = 10
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Tuple:
a_ = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__UpperCAmelCase)
return config
def UpperCAmelCase__ ( self) ->Optional[Any]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02]):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(prediction_type="v_prediction")
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter * scheduler.init_noise_sigma
a_ = sample.to(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a_ = output.prev_sample
a_ = torch.sum(torch.abs(__UpperCAmelCase))
a_ = torch.mean(torch.abs(__UpperCAmelCase))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07) < 1E-2
assert abs(result_mean.item() - 6.1112E-10) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07) < 1E-2
assert abs(result_mean.item() - 0.0_002) < 1E-3
def UpperCAmelCase__ ( self) ->str:
if torch_device == "mps":
return
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter * scheduler.init_noise_sigma
a_ = sample.to(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a_ = output.prev_sample
a_ = torch.sum(torch.abs(__UpperCAmelCase))
a_ = torch.mean(torch.abs(__UpperCAmelCase))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
def UpperCAmelCase__ ( self) ->Any:
if torch_device == "mps":
return
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter.to(__UpperCAmelCase) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a_ = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a_ = output.prev_sample
a_ = torch.sum(torch.abs(__UpperCAmelCase))
a_ = torch.mean(torch.abs(__UpperCAmelCase))
if str(__UpperCAmelCase).startswith("cpu"):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3 | 243 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=32 ,__UpperCAmelCase=5 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=512 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=4 ,) -> Optional[int]:
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Dict = seq_length
lowerCAmelCase__ : List[str] = is_training
lowerCAmelCase__ : List[Any] = use_attention_mask
lowerCAmelCase__ : Optional[int] = use_token_type_ids
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : int = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = max_position_embeddings
lowerCAmelCase__ : Optional[Any] = type_vocab_size
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : str = num_choices
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : List[str] = None
if self.use_attention_mask:
lowerCAmelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase__ : List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
lowerCAmelCase__ : Optional[Any] = config_and_inputs
lowerCAmelCase__ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ : Dict = config_and_inputs
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = True
__lowercase : int = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : int = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" ,from_pt=__UpperCAmelCase )
lowerCAmelCase__ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
@require_flax
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Any = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" ,from_pt=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ,dtype=jnp.intaa )
lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )[0]
lowerCAmelCase__ : Optional[Any] = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) ,__UpperCAmelCase )
# compare the actual values for a slice.
lowerCAmelCase__ : Dict = np.array(
[[[40.4880, 18.0199, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 10.7085], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Tuple = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" ,from_pt=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ,dtype=jnp.intaa )
lowerCAmelCase__ : Any = model(__UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase__ : List[str] = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
| 351 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = '''convnextv2'''
def __init__( self ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=224 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : List[Any] = patch_size
lowerCAmelCase__ : Union[str, Any] = num_stages
lowerCAmelCase__ : Tuple = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowerCAmelCase__ : str = [3, 3, 9, 3] if depths is None else depths
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : Dict = drop_path_rate
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : int = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
| 184 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowercase__ = logging.get_logger(__name__)
# General docstring
lowercase__ = """RegNetConfig"""
# Base docstring
lowercase__ = """facebook/regnet-y-040"""
lowercase__ = [1, 1088, 7, 7]
# Image classification docstring
lowercase__ = """facebook/regnet-y-040"""
lowercase__ = """tabby, tabby cat"""
lowercase__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , a_ : int , a_ : int = 3 , a_ : int = 1 , a_ : int = 1 , a_ : Optional[str] = "relu" , **a_ : int , ):
super().__init__(**a_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase_ : Tuple = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase_ : List[Any] = tf.keras.layers.ConvaD(
filters=a_ , kernel_size=a_ , strides=a_ , padding="VALID" , groups=a_ , use_bias=a_ , name="convolution" , )
lowerCAmelCase_ : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase_ : Dict = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase ( self : str , a_ : Any ):
lowerCAmelCase_ : Optional[int] = self.convolution(self.padding(a_ ) )
lowerCAmelCase_ : int = self.normalization(a_ )
lowerCAmelCase_ : Optional[Any] = self.activation(a_ )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , a_ : RegNetConfig , **a_ : Tuple ):
super().__init__(**a_ )
lowerCAmelCase_ : Union[str, Any] = config.num_channels
lowerCAmelCase_ : Union[str, Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCamelCase ( self : Union[str, Any] , a_ : Any ):
lowerCAmelCase_ : int = shape_list(a_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase_ : Dict = tf.transpose(a_ , perm=(0, 2, 3, 1) )
lowerCAmelCase_ : Any = self.embedder(a_ )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , a_ : int , a_ : int = 2 , **a_ : Optional[Any] ):
super().__init__(**a_ )
lowerCAmelCase_ : Optional[int] = tf.keras.layers.ConvaD(
filters=a_ , kernel_size=1 , strides=a_ , use_bias=a_ , name="convolution" )
lowerCAmelCase_ : Union[str, Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def lowerCamelCase ( self : Tuple , a_ : tf.Tensor , a_ : bool = False ):
return self.normalization(self.convolution(a_ ) , training=a_ )
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , a_ : int , a_ : int , **a_ : Any ):
super().__init__(**a_ )
lowerCAmelCase_ : Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ , name="pooler" )
lowerCAmelCase_ : int = [
tf.keras.layers.ConvaD(filters=a_ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=a_ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCamelCase ( self : Dict , a_ : List[str] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowerCAmelCase_ : Optional[Any] = self.pooler(a_ )
for layer_module in self.attention:
lowerCAmelCase_ : List[Any] = layer_module(a_ )
lowerCAmelCase_ : str = hidden_state * pooled
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , a_ : RegNetConfig , a_ : int , a_ : int , a_ : int = 1 , **a_ : Any ):
super().__init__(**a_ )
lowerCAmelCase_ : List[str] = in_channels != out_channels or stride != 1
lowerCAmelCase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowerCAmelCase_ : Optional[Any] = (
TFRegNetShortCut(a_ , stride=a_ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase_ : Dict = [
TFRegNetConvLayer(a_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
a_ , stride=a_ , groups=a_ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(a_ , kernel_size=1 , activation=a_ , name="layer.2" ),
]
lowerCAmelCase_ : List[Any] = ACTaFN[config.hidden_act]
def lowerCamelCase ( self : Any , a_ : int ):
lowerCAmelCase_ : str = hidden_state
for layer_module in self.layers:
lowerCAmelCase_ : Any = layer_module(a_ )
lowerCAmelCase_ : List[str] = self.shortcut(a_ )
hidden_state += residual
lowerCAmelCase_ : Tuple = self.activation(a_ )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , a_ : RegNetConfig , a_ : int , a_ : int , a_ : int = 1 , **a_ : List[Any] ):
super().__init__(**a_ )
lowerCAmelCase_ : Optional[Any] = in_channels != out_channels or stride != 1
lowerCAmelCase_ : List[Any] = max(1 , out_channels // config.groups_width )
lowerCAmelCase_ : List[Any] = (
TFRegNetShortCut(a_ , stride=a_ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase_ : Tuple = [
TFRegNetConvLayer(a_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
a_ , stride=a_ , groups=a_ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(a_ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(a_ , kernel_size=1 , activation=a_ , name="layer.3" ),
]
lowerCAmelCase_ : Any = ACTaFN[config.hidden_act]
def lowerCamelCase ( self : Dict , a_ : Tuple ):
lowerCAmelCase_ : str = hidden_state
for layer_module in self.layers:
lowerCAmelCase_ : Tuple = layer_module(a_ )
lowerCAmelCase_ : int = self.shortcut(a_ )
hidden_state += residual
lowerCAmelCase_ : Tuple = self.activation(a_ )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , a_ : RegNetConfig , a_ : int , a_ : int , a_ : int = 2 , a_ : int = 2 , **a_ : str ):
super().__init__(**a_ )
lowerCAmelCase_ : Optional[int] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase_ : Union[str, Any] = [
# downsampling is done in the first layer with stride of 2
layer(a_ , a_ , a_ , stride=a_ , name="layers.0" ),
*[layer(a_ , a_ , a_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCamelCase ( self : int , a_ : List[Any] ):
for layer_module in self.layers:
lowerCAmelCase_ : Optional[Any] = layer_module(a_ )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , a_ : RegNetConfig , **a_ : Optional[Any] ):
super().__init__(**a_ )
lowerCAmelCase_ : Optional[int] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
a_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase_ : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(a_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(a_ , a_ , a_ , depth=a_ , name=f'''stages.{i+1}''' ) )
def lowerCamelCase ( self : Tuple , a_ : tf.Tensor , a_ : bool = False , a_ : bool = True ):
lowerCAmelCase_ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase_ : str = hidden_states + (hidden_state,)
lowerCAmelCase_ : List[Any] = stage_module(a_ )
if output_hidden_states:
lowerCAmelCase_ : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=a_ , hidden_states=a_ )
@keras_serializable
class __lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
a_ : Optional[int] = RegNetConfig
def __init__( self : List[Any] , a_ : List[Any] , **a_ : Optional[Any] ):
super().__init__(**a_ )
lowerCAmelCase_ : int = config
lowerCAmelCase_ : Dict = TFRegNetEmbeddings(a_ , name="embedder" )
lowerCAmelCase_ : List[Any] = TFRegNetEncoder(a_ , name="encoder" )
lowerCAmelCase_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ , name="pooler" )
@unpack_inputs
def lowerCamelCase ( self : List[str] , a_ : tf.Tensor , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : bool = False , ):
lowerCAmelCase_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : str = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : List[str] = self.embedder(a_ , training=a_ )
lowerCAmelCase_ : Union[str, Any] = self.encoder(
a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ )
lowerCAmelCase_ : str = encoder_outputs[0]
lowerCAmelCase_ : Any = self.pooler(a_ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase_ : str = tf.transpose(a_ , perm=(0, 3, 1, 2) )
lowerCAmelCase_ : int = tf.transpose(a_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase_ : Optional[Any] = tuple([tf.transpose(a_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a_ , pooler_output=a_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = RegNetConfig
a_ : List[str] = """regnet"""
a_ : Union[str, Any] = """pixel_values"""
@property
def lowerCamelCase ( self : Dict ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowercase__ = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowercase__ = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , A__ , )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : List[Any] , a_ : RegNetConfig , *a_ : str , **a_ : int ):
super().__init__(a_ , *a_ , **a_ )
lowerCAmelCase_ : Optional[Any] = TFRegNetMainLayer(a_ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase ( self : Dict , a_ : tf.Tensor , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : int=False , ):
lowerCAmelCase_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : str = self.regnet(
pixel_values=a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , A__ , )
class __lowerCamelCase ( A__ , A__ ):
'''simple docstring'''
def __init__( self : Any , a_ : RegNetConfig , *a_ : str , **a_ : Union[str, Any] ):
super().__init__(a_ , *a_ , **a_ )
lowerCAmelCase_ : List[str] = config.num_labels
lowerCAmelCase_ : Dict = TFRegNetMainLayer(a_ , name="regnet" )
# classification head
lowerCAmelCase_ : Optional[int] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase ( self : List[str] , a_ : tf.Tensor = None , a_ : tf.Tensor = None , a_ : bool = None , a_ : bool = None , a_ : List[str]=False , ):
lowerCAmelCase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Tuple = self.regnet(
a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ )
lowerCAmelCase_ : Dict = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase_ : List[Any] = self.classifier[0](a_ )
lowerCAmelCase_ : Union[str, Any] = self.classifier[1](a_ )
lowerCAmelCase_ : Any = None if labels is None else self.hf_compute_loss(labels=a_ , logits=a_ )
if not return_dict:
lowerCAmelCase_ : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=a_ , logits=a_ , hidden_states=outputs.hidden_states )
| 241 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __lowerCamelCase ( __UpperCamelCase ) -> Dict:
"""simple docstring"""
return EnvironmentCommand()
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
@staticmethod
def lowerCamelCase ( a_ : ArgumentParser ):
lowerCAmelCase_ : str = parser.add_parser("env" )
download_parser.set_defaults(func=a_ )
download_parser.add_argument(
"--accelerate-config_file" , default=a_ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=a_ )
def __init__( self : Dict , a_ : Dict , *a_ : str ):
lowerCAmelCase_ : Union[str, Any] = accelerate_config_file
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Optional[int] = "not installed"
if is_safetensors_available():
import safetensors
lowerCAmelCase_ : int = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
lowerCAmelCase_ : Optional[Any] = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
lowerCAmelCase_ : List[Any] = "not installed"
lowerCAmelCase_ : Dict = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCAmelCase_ : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a_ ):
lowerCAmelCase_ : int = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCAmelCase_ : Any = (
"\n".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(a_ , a_ )
else f'''\t{accelerate_config}'''
)
lowerCAmelCase_ : Union[str, Any] = "not installed"
lowerCAmelCase_ : Dict = "NA"
if is_torch_available():
import torch
lowerCAmelCase_ : Tuple = torch.__version__
lowerCAmelCase_ : Union[str, Any] = torch.cuda.is_available()
lowerCAmelCase_ : List[str] = "not installed"
lowerCAmelCase_ : Tuple = "NA"
if is_tf_available():
import tensorflow as tf
lowerCAmelCase_ : Union[str, Any] = tf.__version__
try:
# deprecated in v2.1
lowerCAmelCase_ : Tuple = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCAmelCase_ : List[str] = bool(tf.config.list_physical_devices("GPU" ) )
lowerCAmelCase_ : Optional[Any] = "not installed"
lowerCAmelCase_ : Optional[int] = "not installed"
lowerCAmelCase_ : Tuple = "not installed"
lowerCAmelCase_ : Tuple = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
lowerCAmelCase_ : List[Any] = flax.__version__
lowerCAmelCase_ : Tuple = jax.__version__
lowerCAmelCase_ : List[Any] = jaxlib.__version__
lowerCAmelCase_ : str = jax.lib.xla_bridge.get_backend().platform
lowerCAmelCase_ : Dict = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f'''{safetensors_version}''',
"Accelerate version": f'''{accelerate_version}''',
"Accelerate config": f'''{accelerate_config_str}''',
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"Tensorflow version (GPU?)": f'''{tf_version} ({tf_cuda_available})''',
"Flax version (CPU?/GPU?/TPU?)": f'''{flax_version} ({jax_backend})''',
"Jax version": f'''{jax_version}''',
"JaxLib version": f'''{jaxlib_version}''',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a_ ) )
return info
@staticmethod
def lowerCamelCase ( a_ : Tuple ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 241 | 1 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _lowerCAmelCase ( lowercase ) -> List[str]:
return 1.0 / (1.0 + np.exp(-_outputs ))
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
__lowerCAmelCase = np.max(_outputs , axis=-1 , keepdims=lowercase )
__lowerCAmelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Dict ="""sigmoid"""
a : Optional[int] ="""softmax"""
a : List[str] ="""none"""
@add_end_docstrings(
lowerCAmelCase_ , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] =False
a : Optional[int] =ClassificationFunction.NONE
def __init__( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="",**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = tokenizer_kwargs
__lowerCAmelCase = {}
if hasattr(self.model.config,"""return_all_scores""" ) and return_all_scores is None:
__lowerCAmelCase = self.model.config.return_all_scores
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) or top_k is None:
__lowerCAmelCase = top_k
__lowerCAmelCase = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""",__SCREAMING_SNAKE_CASE,)
if return_all_scores:
__lowerCAmelCase = None
else:
__lowerCAmelCase = 1
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__lowerCAmelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = super().__call__(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__lowerCAmelCase = """top_k""" not in kwargs
if isinstance(args[0],__SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.framework
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
return self.tokenizer(**__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0],__SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0],text_pair=inputs[0][1],return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(__SCREAMING_SNAKE_CASE,return_tensors=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.model(**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__lowerCAmelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__lowerCAmelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config,"""function_to_apply""" ) and function_to_apply is None:
__lowerCAmelCase = self.model.config.function_to_apply
else:
__lowerCAmelCase = ClassificationFunction.NONE
__lowerCAmelCase = model_outputs["""logits"""][0]
__lowerCAmelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__lowerCAmelCase = sigmoid(__SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__lowerCAmelCase = softmax(__SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
__lowerCAmelCase = outputs
else:
raise ValueError(f'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__lowerCAmelCase = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(__SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda __SCREAMING_SNAKE_CASE : x["score"],reverse=__SCREAMING_SNAKE_CASE )
if top_k is not None:
__lowerCAmelCase = dict_scores[:top_k]
return dict_scores
| 46 |
'''simple docstring'''
import sys
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = [[0 for x in range(lowercase )] for x in range(lowercase )]
__lowerCAmelCase = [[0 for x in range(lowercase )] for x in range(lowercase )]
for chain_length in range(2 , lowercase ):
for a in range(1 , n - chain_length + 1 ):
__lowerCAmelCase = a + chain_length - 1
__lowerCAmelCase = sys.maxsize
for c in range(lowercase , lowercase ):
__lowerCAmelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__lowerCAmelCase = cost
__lowerCAmelCase = c
return matrix, sol
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
if i == j:
print("""A""" + str(lowercase ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(lowercase , lowercase , optimal_solution[i][j] )
print_optiomal_solution(lowercase , optimal_solution[i][j] + 1 , lowercase )
print(""")""" , end=""" """ )
def _lowerCAmelCase ( ) -> Dict:
__lowerCAmelCase = [30, 35, 15, 5, 10, 20, 25]
__lowerCAmelCase = len(lowercase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__lowerCAmelCase , __lowerCAmelCase = matrix_chain_order(lowercase )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 46 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : int = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """conditional_detr"""
__a = ["""past_key_values"""]
__a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Tuple , UpperCamelCase : int=True , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : int=3 , UpperCamelCase : List[str]=300 , UpperCamelCase : Optional[Any]=6 , UpperCamelCase : int=2_048 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Any=6 , UpperCamelCase : str=2_048 , UpperCamelCase : Optional[int]=8 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : Any=True , UpperCamelCase : Optional[int]="relu" , UpperCamelCase : Any=256 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Any=0.02 , UpperCamelCase : List[str]=1.0 , UpperCamelCase : List[str]=False , UpperCamelCase : List[str]="sine" , UpperCamelCase : Any="resnet50" , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[int]=False , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=5 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : List[Any]=1 , UpperCamelCase : Any=1 , UpperCamelCase : int=2 , UpperCamelCase : List[Any]=5 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Dict=0.25 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__UpperCAmelCase : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : Any = backbone_config.get("""model_type""" )
__UpperCAmelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : Dict = config_class.from_dict(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = use_timm_backbone
__UpperCAmelCase : Tuple = backbone_config
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[int] = num_queries
__UpperCAmelCase : List[Any] = d_model
__UpperCAmelCase : List[str] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : int = encoder_attention_heads
__UpperCAmelCase : Union[str, Any] = decoder_ffn_dim
__UpperCAmelCase : Any = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : Optional[Any] = dropout
__UpperCAmelCase : List[str] = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : Optional[Any] = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : Tuple = init_xavier_std
__UpperCAmelCase : str = encoder_layerdrop
__UpperCAmelCase : str = decoder_layerdrop
__UpperCAmelCase : int = encoder_layers
__UpperCAmelCase : List[Any] = auxiliary_loss
__UpperCAmelCase : Dict = position_embedding_type
__UpperCAmelCase : List[str] = backbone
__UpperCAmelCase : Any = use_pretrained_backbone
__UpperCAmelCase : List[Any] = dilation
# Hungarian matcher
__UpperCAmelCase : List[str] = class_cost
__UpperCAmelCase : Any = bbox_cost
__UpperCAmelCase : str = giou_cost
# Loss coefficients
__UpperCAmelCase : str = mask_loss_coefficient
__UpperCAmelCase : List[str] = dice_loss_coefficient
__UpperCAmelCase : List[Any] = cls_loss_coefficient
__UpperCAmelCase : List[Any] = bbox_loss_coefficient
__UpperCAmelCase : str = giou_loss_coefficient
__UpperCAmelCase : int = focal_alpha
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return self.d_model
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__UpperCAmelCase : Any = self.backbone_config.to_dict()
__UpperCAmelCase : Optional[int] = self.__class__.model_type
return output
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = version.parse("""1.11""" )
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return 1e-5
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return 12
| 115 |
"""simple docstring"""
import numpy as np
UpperCAmelCase : Optional[Any] = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.array(UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = np.where(letter == self.SQUARE )
__UpperCAmelCase : Optional[Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCamelCase__ ( self : Any , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : str = message.lower()
__UpperCAmelCase : List[Any] = message.replace(""" """ , """""" )
__UpperCAmelCase : List[Any] = message.replace("""j""" , """i""" )
__UpperCAmelCase : Optional[int] = np.empty((2, len(UpperCamelCase )) )
for letter_index in range(len(UpperCamelCase ) ):
__UpperCAmelCase : List[Any] = self.letter_to_numbers(message[letter_index] )
__UpperCAmelCase : str = numbers[0]
__UpperCAmelCase : int = numbers[1]
__UpperCAmelCase : Union[str, Any] = first_step.reshape(2 * len(UpperCamelCase ) )
__UpperCAmelCase : Optional[Any] = """"""
for numbers_index in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Any = int(second_step[numbers_index * 2] )
__UpperCAmelCase : Any = int(second_step[(numbers_index * 2) + 1] )
__UpperCAmelCase : str = self.numbers_to_letter(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = encoded_message + letter
return encoded_message
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = message.lower()
message.replace(""" """ , """""" )
__UpperCAmelCase : int = np.empty(2 * len(UpperCamelCase ) )
for letter_index in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Any = self.letter_to_numbers(message[letter_index] )
__UpperCAmelCase : Any = numbers[0]
__UpperCAmelCase : Dict = numbers[1]
__UpperCAmelCase : str = first_step.reshape((2, len(UpperCamelCase )) )
__UpperCAmelCase : Union[str, Any] = """"""
for numbers_index in range(len(UpperCamelCase ) ):
__UpperCAmelCase : Optional[int] = int(second_step[0, numbers_index] )
__UpperCAmelCase : Tuple = int(second_step[1, numbers_index] )
__UpperCAmelCase : Tuple = self.numbers_to_letter(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Any = decoded_message + letter
return decoded_message
| 115 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
__A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(a_ , a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A , __A = emb.weight.shape
__A = nn.Linear(a_ , a_ , bias=a_ )
__A = emb.weight.data
return lin_layer
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
__A = torch.load(a_ , map_location="cpu" )
__A = mam_aaa["args"] or mam_aaa["cfg"]["model"]
__A = mam_aaa["model"]
remove_ignore_keys_(a_ )
__A = state_dict["encoder.embed_tokens.weight"].shape[0]
__A = MaMaaaConfig(
vocab_size=a_ , max_position_embeddings=1_0_2_4 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
__A = state_dict["decoder.embed_tokens.weight"]
__A = MaMaaaForConditionalGeneration(a_ )
model.model.load_state_dict(a_ , strict=a_ )
__A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE :List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 355 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"text": Value("string" )} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "text"
snake_case_ = "labels"
def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A = copy.deepcopy(self )
__A = self.label_schema.copy()
__A = features[self.label_column]
__A = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Dict ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 124 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A :
__magic_name__ = 42
__magic_name__ = None
__magic_name__ = None
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Dict = Node(1 )
A : List[Any] = Node(2 )
A : Any = Node(3 )
A : Tuple = Node(4 )
A : Tuple = Node(5 )
return tree
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = []
if root is None:
return output
A : Optional[Any] = deque([root] )
while process_queue:
A : Any = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = []
def populate_output(snake_case__ , snake_case__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__A , __A )
return output
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[str] = []
def populate_output(snake_case__ , snake_case__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__A , __A )
return output
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if root is None:
return []
A : List[Any] = []
A : Dict = 0
A : Union[str, Any] = height(__A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__A , __A ) )
A : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__A , __A ) )
A : Tuple = 0
return output
def lowerCAmelCase_ ( ): # Main function for testing.
'''simple docstring'''
A : Optional[Any] = make_tree()
print(F'In-order Traversal: {inorder(__A )}' )
print(F'Pre-order Traversal: {preorder(__A )}' )
print(F'Post-order Traversal: {postorder(__A )}' , '''\n''' )
print(F'Height of Tree: {height(__A )}' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(__A ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(__A ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(__A , level=__A ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 3 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase__ :
def __init__( self ) -> Any:
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str:
if not self.initialized:
__UpperCamelCase = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = True
def __lowerCamelCase ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> List[Any]:
if index is not None and index.is_initialized() and len(lowercase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ) -> Dict:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Any:
return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> int:
__UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = """custom"""
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase )
else:
__UpperCamelCase = cls._build_index(lowercase )
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 349 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_snake_case = [144, 192, 240]
_snake_case = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_snake_case = [96, 120, 144]
_snake_case = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_snake_case = [64, 80, 96]
_snake_case = [16, 16, 24, 48, 64, 80, 320]
_snake_case = 0.05
_snake_case = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
_snake_case = 512
_snake_case = 16
_snake_case = 21
_snake_case = """pascal-voc-id2label.json"""
else:
_snake_case = 1000
_snake_case = """imagenet-1k-id2label.json"""
_snake_case = """huggingface/label-files"""
_snake_case = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
_snake_case = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
_snake_case = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
_snake_case = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
_snake_case = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
_snake_case = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
_snake_case = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
_snake_case = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
_snake_case = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
_snake_case = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
_snake_case = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
_snake_case = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
_snake_case = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
_snake_case = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
_snake_case = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
_snake_case = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
_snake_case = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
_snake_case = name.replace(f""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if f""".global_rep.{i}.bias""" in name:
_snake_case = name.replace(f""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
_snake_case = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
_snake_case = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
_snake_case = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
_snake_case = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
_snake_case = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
_snake_case = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
_snake_case = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
_snake_case = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
_snake_case = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
_snake_case = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
_snake_case = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
_snake_case = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
_snake_case = """mobilevit.""" + name
return name
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if base_model:
_snake_case = """"""
else:
_snake_case = """mobilevit."""
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key[:8] == "encoder.":
_snake_case = key[8:]
if "qkv" in key:
_snake_case = key.split(""".""" )
_snake_case = int(key_split[0][6:] ) - 1
_snake_case = int(key_split[3] )
_snake_case = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
_snake_case = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_snake_case = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val[:dim]
_snake_case = val[dim : dim * 2]
_snake_case = val[-dim:]
else:
_snake_case = val
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
_snake_case = get_mobilevit_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
_snake_case = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
_snake_case = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval()
else:
_snake_case = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
_snake_case = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case = model(**_SCREAMING_SNAKE_CASE )
_snake_case = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_snake_case = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_snake_case = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_snake_case = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_snake_case = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_snake_case = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_snake_case = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
_snake_case = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
_snake_case = model_mapping[mobilevit_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 270 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_snake_case = len(_SCREAMING_SNAKE_CASE )
_snake_case = len(_SCREAMING_SNAKE_CASE )
_snake_case = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_snake_case = 0
_snake_case = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_snake_case = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_snake_case = i
_snake_case = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod() | 270 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __lowerCAmelCase ,unittest.TestCase ):
__lowerCamelCase : Any = AudioLDMPipeline
__lowerCamelCase : Union[str, Any] = TEXT_TO_AUDIO_PARAMS
__lowerCamelCase : Dict = TEXT_TO_AUDIO_BATCH_PARAMS
__lowerCamelCase : List[Any] = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def _snake_case ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCamelCase_ , )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_lowerCAmelCase = ClapTextModelWithProjection(lowerCamelCase_ )
_lowerCAmelCase = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
_lowerCAmelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCamelCase_ , )
_lowerCAmelCase = SpeechTaHifiGan(lowerCamelCase_ )
_lowerCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Any:
if str(lowerCamelCase_ ).startswith("mps" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase_ )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_lowerCAmelCase = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def _snake_case ( self ) -> int:
_lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase = 3 * [inputs["prompt"]]
# forward
_lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase = 3 * [inputs.pop("prompt" )]
_lowerCAmelCase = audioldm_pipe.tokenizer(
lowerCamelCase_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="pt" , )
_lowerCAmelCase = text_inputs["input_ids"].to(lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe.text_encoder(
lowerCamelCase_ , )
_lowerCAmelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(lowerCamelCase_ , dim=-1 )
_lowerCAmelCase = prompt_embeds
# forward
_lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase = 3 * ["this is a negative prompt"]
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = 3 * [inputs["prompt"]]
# forward
_lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase = 3 * [inputs.pop("prompt" )]
_lowerCAmelCase = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase = audioldm_pipe.tokenizer(
lowerCamelCase_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="pt" , )
_lowerCAmelCase = text_inputs["input_ids"].to(lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe.text_encoder(
lowerCamelCase_ , )
_lowerCAmelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(lowerCamelCase_ , dim=-1 )
embeds.append(lowerCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase = embeds
# forward
_lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _snake_case ( self ) -> int:
_lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase = "egg cracking"
_lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> str:
_lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
_lowerCAmelCase = audioldm_pipe(lowerCamelCase_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(lowerCamelCase_ , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _snake_case ( self ) -> int:
_lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe.vocoder.config.sampling_rate
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **lowerCamelCase_ )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) / vocoder_sampling_rate == 0.016
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **lowerCamelCase_ )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) / vocoder_sampling_rate == 0.032
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase = ["hey"]
_lowerCAmelCase = audioldm_pipe(lowerCamelCase_ , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
assert audio_shape == (1, 256)
_lowerCAmelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_lowerCAmelCase = SpeechTaHifiGan(lowerCamelCase_ ).to(lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe(lowerCamelCase_ , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _snake_case ( self ) -> List[str]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase_ )
def _snake_case ( self ) -> Optional[Any]:
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCamelCase_ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase_ )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase="cpu" , _lowerCAmelCase=torch.floataa , _lowerCAmelCase=0 ) -> Dict:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_lowerCAmelCase = np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 8, 128, 16) )
_lowerCAmelCase = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ , dtype=lowerCamelCase_ )
_lowerCAmelCase = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def _snake_case ( self ) -> str:
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
_lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase = self.get_inputs(lowerCamelCase_ )
_lowerCAmelCase = 25
_lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 81920
_lowerCAmelCase = audio[77230:77240]
_lowerCAmelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase = self.get_inputs(lowerCamelCase_ )
_lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 81920
_lowerCAmelCase = audio[27780:27790]
_lowerCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 158 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 343 | 0 |
from math import ceil
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 1_001 ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 1
for i in range(1, int(ceil(n / 2.0 ) ) ):
lowerCAmelCase : str = 2 * i + 1
lowerCAmelCase : str = 2 * i
lowerCAmelCase : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__A : int = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 323 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 323 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A (lowerCAmelCase__ :list[int] , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :int ) -> tuple[float, list[float]]:
'''simple docstring'''
_a = list(range(len(lowerCAmelCase__ ) ) )
_a = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ )
_a = 0
_a = [0] * len(lowerCAmelCase__ )
for i in index:
if weight[i] <= capacity:
_a = 1
max_value += value[i]
capacity -= weight[i]
else:
_a = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """openai/whisper-base"""
_lowerCAmelCase = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_lowerCAmelCase = """transcriber"""
_lowerCAmelCase = WhisperProcessor
_lowerCAmelCase = WhisperForConditionalGeneration
_lowerCAmelCase = ["""audio"""]
_lowerCAmelCase = ["""text"""]
def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]:
return self.pre_processor(__magic_name__ , return_tensors='pt' ).input_features
def __UpperCAmelCase ( self , __magic_name__ ) -> Any:
return self.model.generate(inputs=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[str]:
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 168 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ : Any = np.full((len(__lowerCAmelCase ), sequence_length, 2) , __lowerCAmelCase )
else:
lowerCAmelCase__ : Optional[int] = np.full((len(__lowerCAmelCase ), sequence_length) , __lowerCAmelCase )
for i, tensor in enumerate(__lowerCAmelCase ):
if padding_side == "right":
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ : Tuple = tensor[:sequence_length]
else:
lowerCAmelCase__ : Any = tensor[:sequence_length]
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ : List[Any] = tensor[:sequence_length]
else:
lowerCAmelCase__ : List[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = ord(__lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
lowerCAmelCase__ : List[Any] = unicodedata.category(__lowerCAmelCase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowerCAmelCase_( UpperCamelCase_ ):
'''simple docstring'''
__lowercase : PreTrainedTokenizerBase
__lowercase : Union[bool, str, PaddingStrategy] = True
__lowercase : Optional[int] = None
__lowercase : Optional[int] = None
__lowercase : int = -1_0_0
__lowercase : str = "pt"
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
import torch
lowerCAmelCase__ : Tuple = """label""" if """label""" in features[0].keys() else """labels"""
lowerCAmelCase__ : int = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowerCAmelCase__ : List[str] = self.tokenizer.pad(
_a ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" if labels is None else None ,)
if labels is None:
return batch
lowerCAmelCase__ : List[Any] = torch.tensor(batch["""entity_ids"""] ).shape[1]
lowerCAmelCase__ : Dict = self.tokenizer.padding_side
if padding_side == "right":
lowerCAmelCase__ : Any = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
lowerCAmelCase__ : List[str] = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
lowerCAmelCase__ : List[str] = [feature["""ner_tags"""] for feature in features]
lowerCAmelCase__ : List[str] = padding_tensor(_a ,-1 ,_a ,_a )
lowerCAmelCase__ : Tuple = [feature["""original_entity_spans"""] for feature in features]
lowerCAmelCase__ : Tuple = padding_tensor(_a ,(-1, -1) ,_a ,_a )
lowerCAmelCase__ : Optional[Any] = {k: torch.tensor(_a ,dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 362 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 184 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 100 * 2**20, 900 * 2**20] )
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a__ : List[str] =dataset_size < in_memory_max_size
else:
a__ : Any =False
a__ : List[str] =is_small_dataset(SCREAMING_SNAKE_CASE )
assert result == expected
| 95 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase : List[Any] = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
UpperCAmelCase : Optional[int] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
_lowercase : Union[str, Any] = FunnelTokenizer
_lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : int = 2
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__="##" , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Optional[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
a__ : List[str] =getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
a__ : Union[str, Any] =do_lower_case
a__ : Any =strip_accents
a__ : Optional[Any] =tokenize_chinese_chars
a__ : Dict =normalizer_class(**lowerCAmelCase__ )
a__ : Any =do_lower_case
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> str:
'''simple docstring'''
a__ : Dict =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] =[self.sep_token_id]
a__ : Union[str, Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
a__ : Tuple =self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 95 | 1 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCamelCase_ :
def __init__( self , snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = data
UpperCAmelCase = [0x6_7_4_5_2_3_0_1, 0xE_F_C_D_A_B_8_9, 0x9_8_B_A_D_C_F_E, 0x1_0_3_2_5_4_7_6, 0xC_3_D_2_E_1_F_0]
@staticmethod
def UpperCamelCase_ ( snake_case__ , snake_case__ ) -> List[str]:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0xF_F_F_F_F_F_F_F
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase_ ( self , snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = list(struct.unpack(""">16L""" , snake_case__ ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.padding()
UpperCAmelCase = self.split_blocks()
for block in self.blocks:
UpperCAmelCase = self.expand_block(snake_case__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase = (b & c) | ((~b) & d)
UpperCAmelCase = 0x5_A_8_2_7_9_9_9
elif 20 <= i < 40:
UpperCAmelCase = b ^ c ^ d
UpperCAmelCase = 0x6_E_D_9_E_B_A_1
elif 40 <= i < 60:
UpperCAmelCase = (b & c) | (b & d) | (c & d)
UpperCAmelCase = 0x8_F_1_B_B_C_D_C
elif 60 <= i < 80:
UpperCAmelCase = b ^ c ^ d
UpperCAmelCase = 0xC_A_6_2_C_1_D_6
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
self.rotate(snake_case__ , 5 ) + f + e + k + expanded_block[i] & 0xF_F_F_F_F_F_F_F,
a,
self.rotate(snake_case__ , 30 ),
c,
d,
)
UpperCAmelCase = (
self.h[0] + a & 0xF_F_F_F_F_F_F_F,
self.h[1] + b & 0xF_F_F_F_F_F_F_F,
self.h[2] + c & 0xF_F_F_F_F_F_F_F,
self.h[3] + d & 0xF_F_F_F_F_F_F_F,
self.h[4] + e & 0xF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = b"""Test String"""
assert SHAaHash(lowerCAmelCase ).final_hash() == hashlib.shaa(lowerCAmelCase ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = bytes(lowerCAmelCase , """utf-8""" )
print(SHAaHash(lowerCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 248 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = math.inf , lowerCAmelCase = -math.inf , lowerCAmelCase = math.inf , lowerCAmelCase = -math.inf , lowerCAmelCase = False , lowerCAmelCase = 100 , lowerCAmelCase = 0.01 , lowerCAmelCase = 1 , ):
'''simple docstring'''
UpperCAmelCase = False
UpperCAmelCase = search_prob
UpperCAmelCase = start_temperate
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = None
while not search_end:
UpperCAmelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase = current_state
scores.append(lowerCAmelCase )
iterations += 1
UpperCAmelCase = None
UpperCAmelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
UpperCAmelCase = neighbors.pop(lowerCAmelCase )
UpperCAmelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase = picked_neighbor
else:
UpperCAmelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase = picked_neighbor
UpperCAmelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase = True
else:
UpperCAmelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[str] = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ : int = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : Optional[Any] = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
lowerCAmelCase_ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
| 248 | 1 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _UpperCamelCase ( A ):
'''simple docstring'''
def __get__( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Any=None):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute')
__lowercase ='__cached_' + self.fget.__name__
__lowercase =getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
if cached is None:
__lowercase =self.fget(_lowerCAmelCase)
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
return cached
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if is_torch_fx_proxy(_lowerCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCAmelCase , np.ndarray )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return isinstance(_lowerCAmelCase , np.ndarray )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return _is_numpy(_lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
import torch
return isinstance(_lowerCAmelCase , torch.Tensor )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(_lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
import torch
return isinstance(_lowerCAmelCase , torch.device )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(_lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
import torch
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if hasattr(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase =getattr(_lowerCAmelCase , _lowerCAmelCase )
else:
return False
return isinstance(_lowerCAmelCase , torch.dtype )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(_lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
import tensorflow as tf
return isinstance(_lowerCAmelCase , tf.Tensor )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(_lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCAmelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(_lowerCAmelCase )
return type(_lowerCAmelCase ) == tf.Tensor
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCAmelCase , jnp.ndarray )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(_lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return [to_py_obj(_lowerCAmelCase ) for o in obj]
elif is_tf_tensor(_lowerCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCAmelCase ):
return np.asarray(_lowerCAmelCase ).tolist()
elif isinstance(_lowerCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return np.array(_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCAmelCase ):
return np.asarray(_lowerCAmelCase )
else:
return obj
class _UpperCamelCase ( A ):
'''simple docstring'''
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =fields(self)
# Safety and consistency checks
if not len(_lowerCAmelCase):
raise ValueError(f"""{self.__class__.__name__} has no fields.""")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""")
__lowercase =getattr(self , class_fields[0].name)
__lowercase =all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(_lowerCAmelCase):
if isinstance(_lowerCAmelCase , _lowerCAmelCase):
__lowercase =first_field.items()
__lowercase =True
else:
try:
__lowercase =iter(_lowerCAmelCase)
__lowercase =True
except TypeError:
__lowercase =False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_lowerCAmelCase):
if (
not isinstance(_lowerCAmelCase , (list, tuple))
or not len(_lowerCAmelCase) == 2
or not isinstance(element[0] , _lowerCAmelCase)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__lowercase =first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""")
break
setattr(self , element[0] , element[1])
if element[1] is not None:
__lowercase =element[1]
elif first_field is not None:
__lowercase =first_field
else:
for field in class_fields:
__lowercase =getattr(self , field.name)
if v is not None:
__lowercase =v
def __delitem__( self : str , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""")
def __lowerCamelCase ( self : int , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any):
'''simple docstring'''
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""")
def __lowerCamelCase ( self : Optional[Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : int):
'''simple docstring'''
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""")
def __lowerCamelCase ( self : str , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str]):
'''simple docstring'''
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""")
def __getitem__( self : str , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase):
__lowercase =dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : int):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_lowerCAmelCase , _lowerCAmelCase)
super().__setattr__(_lowerCAmelCase , _lowerCAmelCase)
def __setitem__( self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
super().__setitem__(_lowerCAmelCase , _lowerCAmelCase)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : int):
'''simple docstring'''
return tuple(self[k] for k in self.keys())
class _UpperCamelCase ( A , A ):
'''simple docstring'''
@classmethod
def __lowerCamelCase ( cls : List[str] , _lowerCAmelCase : List[str]):
'''simple docstring'''
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}""")
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """longest"""
lowerCAmelCase__ = """max_length"""
lowerCAmelCase__ = """do_not_pad"""
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """pt"""
lowerCAmelCase__ = """tf"""
lowerCAmelCase__ = """np"""
lowerCAmelCase__ = """jax"""
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : List[ContextManager]):
'''simple docstring'''
__lowercase =context_managers
__lowercase =ExitStack()
def __enter__( self : Union[str, Any]):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(_lowerCAmelCase)
def __exit__( self : Tuple , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : str):
'''simple docstring'''
self.stack.__exit__(*_lowerCAmelCase , **_lowerCAmelCase)
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =infer_framework(_lowerCAmelCase )
if framework == "tf":
__lowercase =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__lowercase =inspect.signature(model_class.forward ) # PyTorch models
else:
__lowercase =inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =model_class.__name__
__lowercase =infer_framework(_lowerCAmelCase )
if framework == "tf":
__lowercase =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__lowercase =inspect.signature(model_class.forward ) # PyTorch models
else:
__lowercase =inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _A ( _lowerCAmelCase , _lowerCAmelCase = "" , _lowerCAmelCase = "." ):
"""simple docstring"""
def _flatten_dict(_lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase="." ):
for k, v in d.items():
__lowercase =str(_lowerCAmelCase ) + delimiter + str(_lowerCAmelCase ) if parent_key else k
if v and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
yield from flatten_dict(_lowerCAmelCase , _lowerCAmelCase , delimiter=_lowerCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) )
@contextmanager
def _A ( _lowerCAmelCase , _lowerCAmelCase = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _A ( _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
if is_numpy_array(_lowerCAmelCase ):
return np.transpose(_lowerCAmelCase , axes=_lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.T if axes is None else array.permute(*_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.transpose(_lowerCAmelCase , perm=_lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return jnp.transpose(_lowerCAmelCase , axes=_lowerCAmelCase )
else:
raise ValueError(f"""Type not supported for transpose: {type(_lowerCAmelCase )}.""" )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if is_numpy_array(_lowerCAmelCase ):
return np.reshape(_lowerCAmelCase , _lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.reshape(*_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.reshape(_lowerCAmelCase , _lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return jnp.reshape(_lowerCAmelCase , _lowerCAmelCase )
else:
raise ValueError(f"""Type not supported for reshape: {type(_lowerCAmelCase )}.""" )
def _A ( _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
if is_numpy_array(_lowerCAmelCase ):
return np.squeeze(_lowerCAmelCase , axis=_lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCAmelCase , axis=_lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return jnp.squeeze(_lowerCAmelCase , axis=_lowerCAmelCase )
else:
raise ValueError(f"""Type not supported for squeeze: {type(_lowerCAmelCase )}.""" )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if is_numpy_array(_lowerCAmelCase ):
return np.expand_dims(_lowerCAmelCase , _lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.unsqueeze(dim=_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCAmelCase , axis=_lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return jnp.expand_dims(_lowerCAmelCase , axis=_lowerCAmelCase )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(_lowerCAmelCase )}.""" )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if is_numpy_array(_lowerCAmelCase ):
return np.size(_lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.numel()
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.size(_lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(_lowerCAmelCase )}.""" )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(_lowerCAmelCase , (tuple, list) ):
__lowercase =[f"""{repo_id}--{v}""" if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
__lowercase =f"""{repo_id}--{value}"""
return auto_map
def _A ( _lowerCAmelCase ):
"""simple docstring"""
for base_class in inspect.getmro(_lowerCAmelCase ):
__lowercase =base_class.__module__
__lowercase =base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 166 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """OwlViTImageProcessor"""
lowerCAmelCase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[str] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=None , **_lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
__lowercase =kwargs.pop('feature_extractor')
__lowercase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
def __call__( self : Dict , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]="max_length" , _lowerCAmelCase : Optional[Any]="np" , **_lowerCAmelCase : Any):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.')
if text is not None:
if isinstance(_lowerCAmelCase , _lowerCAmelCase) or (isinstance(_lowerCAmelCase , _lowerCAmelCase) and not isinstance(text[0] , _lowerCAmelCase)):
__lowercase =[self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase) and isinstance(text[0] , _lowerCAmelCase):
__lowercase =[]
# Maximum number of queries across batch
__lowercase =max([len(_lowerCAmelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCAmelCase) != max_num_queries:
__lowercase =t + [' '] * (max_num_queries - len(_lowerCAmelCase))
__lowercase =self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
encodings.append(_lowerCAmelCase)
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings')
if return_tensors == "np":
__lowercase =np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowercase =jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
__lowercase =torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0)
__lowercase =torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowercase =tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0)
else:
raise ValueError('Target return tensor type could not be returned')
__lowercase =BatchEncoding()
__lowercase =input_ids
__lowercase =attention_mask
if query_images is not None:
__lowercase =BatchEncoding()
__lowercase =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase).pixel_values
__lowercase =query_pixel_values
if images is not None:
__lowercase =self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
if text is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase) , tensor_type=_lowerCAmelCase)
def __lowerCamelCase ( self : int , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Any):
'''simple docstring'''
return self.image_processor.post_process(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Any):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : str , *_lowerCAmelCase : int , **_lowerCAmelCase : Dict):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCAmelCase , )
return self.image_processor
| 166 | 1 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__A = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
__A = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
__A = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
return float((preds == labels).mean() )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = simple_accuracy(lowercase_ , lowercase_ )
_A = float(fa_score(y_true=lowercase_ , y_pred=lowercase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = np.array(lowercase_ )
_A = np.array(lowercase_ )
_A = en_sentvecs.shape[0]
# mean centering
_A = en_sentvecs - np.mean(lowercase_ , axis=0 )
_A = in_sentvecs - np.mean(lowercase_ , axis=0 )
_A = cdist(lowercase_ , lowercase_ , '''cosine''' )
_A = np.array(range(lowercase_ ) )
_A = sim.argsort(axis=1 )[:, :10]
_A = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __A ( self: List[str] ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __A ( self: List[Any] , __A: Optional[Any] , __A: str ) -> Dict:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCamelCase__ , lowerCamelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 357 |
from __future__ import annotations
import math
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
)
def __A ( ):
'''simple docstring'''
_A = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
_A = math.log(len(_lowercase ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , _lowercase , _lowercase , _lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_UpperCAmelCase : Any =get_tests_dir("""fixtures""")
_UpperCAmelCase : List[Any] =get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_UpperCAmelCase : Optional[int] =get_tests_dir("""fixtures/dummy-config.json""")
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : List[str] = 0
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Tuple = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ : List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCAmelCase_ : Any = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ).to_dict()
config_dict.pop('''feature_extractor_type''' )
lowerCAmelCase_ : int = WavaVecaFeatureExtractor(**SCREAMING_SNAKE_CASE_ )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE_ )
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# make sure private variable is not incorrectly saved
lowerCAmelCase_ : Union[str, Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCAmelCase_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def lowercase_ ( self ) -> str:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCAmelCase_ : Tuple = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ , revision='''aaaaaa''' )
def lowercase_ ( self ) -> Optional[int]:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowerCAmelCase_ : str = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase_ ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Any = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def lowercase_ ( self ) -> Tuple:
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE_ )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ : str = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self ) -> Dict:
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = True
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE_ )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# If remote code is not set, the default is to use local
lowerCAmelCase_ : int = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ : List[str] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(SCREAMING_SNAKE_CASE_ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] | 262 |
import logging
from transformers.configuration_utils import PretrainedConfig
__a = logging.getLogger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[int] = 'masked_bert'
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Dict="constant" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = pruning_method
lowercase_ = mask_init
lowercase_ = mask_scale
| 30 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase_ : str = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 215 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
lowerCamelCase_ : List[Any] = TypeVar("""T""")
class __A ( Generic[T] ):
"""simple docstring"""
def __init__( self , __A ) -> None:
a =data
a =self
a =0
class __A ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
# map from node name to the node object
a ={}
def SCREAMING_SNAKE_CASE ( self , __A ) -> None:
# create a new set with x as its member
a =DisjointSetTreeNode(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
a =self.map[data]
if elem_ref != elem_ref.parent:
a =self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
a =nodea
else:
a =nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(__A ) , self.find_set(__A ) )
class __A ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
a ={}
def SCREAMING_SNAKE_CASE ( self , __A ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
a ={}
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> None:
# add an edge with the given weight
self.add_node(__A )
self.add_node(__A )
a =weight
a =weight
def SCREAMING_SNAKE_CASE ( self ) -> GraphUndirectedWeighted[T]:
a =[]
a =set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __A : x[2] )
# creating the disjoint set
a =DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__A )
# MST generation
a =0
a =0
a =GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
a , a , a =edges[index]
index += 1
a =disjoint_set.find_set(__A )
a =disjoint_set.find_set(__A )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__A , __A , __A )
disjoint_set.union(__A , __A )
return graph | 215 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class snake_case__:
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any=13 , SCREAMING_SNAKE_CASE : List[str]=7 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=99 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : List[str]=37 , SCREAMING_SNAKE_CASE : str="gelu" , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : int=512 , SCREAMING_SNAKE_CASE : Tuple=16 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=None , ):
lowercase__ : List[Any] = parent
lowercase__ : List[str] = 13
lowercase__ : Optional[Any] = 7
lowercase__ : List[str] = True
lowercase__ : List[str] = True
lowercase__ : Any = True
lowercase__ : Optional[int] = True
lowercase__ : Optional[int] = 99
lowercase__ : List[Any] = 384
lowercase__ : List[Any] = 2
lowercase__ : Union[str, Any] = 4
lowercase__ : str = 37
lowercase__ : Tuple = "gelu"
lowercase__ : int = 0.1
lowercase__ : Tuple = 0.1
lowercase__ : List[Any] = 512
lowercase__ : Optional[int] = 16
lowercase__ : List[str] = 2
lowercase__ : str = 0.02
lowercase__ : List[str] = 3
lowercase__ : Any = 4
lowercase__ : Optional[Any] = 128
lowercase__ : Optional[int] = 2
lowercase__ : Optional[Any] = 9
lowercase__ : Any = 1
lowercase__ : Dict = None
def snake_case ( self : Union[str, Any] ):
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Union[str, Any] = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Union[str, Any] = None
if self.use_token_type_ids:
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Dict = None
lowercase__ : Optional[int] = None
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Optional[int] = TFConvBertModel(config=lowerCAmelCase__ )
lowercase__ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase__ : List[str] = [input_ids, input_mask]
lowercase__ : Any = model(lowerCAmelCase__ )
lowercase__ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : str = TFConvBertForMaskedLM(config=lowerCAmelCase__ )
lowercase__ : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : List[Any] = self.num_labels
lowercase__ : Tuple = TFConvBertForSequenceClassification(config=lowerCAmelCase__ )
lowercase__ : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Union[str, Any] = self.num_choices
lowercase__ : int = TFConvBertForMultipleChoice(config=lowerCAmelCase__ )
lowercase__ : Dict = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
lowercase__ : List[str] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Optional[int] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
lowercase__ : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowercase__ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Optional[int] = self.num_labels
lowercase__ : List[str] = TFConvBertForTokenClassification(config=lowerCAmelCase__ )
lowercase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : List[Any] = TFConvBertForQuestionAnswering(config=lowerCAmelCase__ )
lowercase__ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : Any ):
lowercase__ : Any = self.prepare_config_and_inputs()
(
lowercase__
) : Union[str, Any] = config_and_inputs
lowercase__ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase_ = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Any ):
lowercase__ : Union[str, Any] = TFConvBertModelTester(self )
lowercase__ : str = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : int ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case ( self : Tuple ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def snake_case ( self : List[Any] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def snake_case ( self : List[str] ):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def snake_case ( self : Any ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Any = True
lowercase__ : str = True
if hasattr(lowerCAmelCase__ , "use_cache" ):
lowercase__ : Union[str, Any] = True
lowercase__ : str = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
lowercase__ : Optional[Any] = getattr(self.model_tester , "key_length" , lowerCAmelCase__ )
for model_class in self.all_model_classes:
lowercase__ : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ : List[Any] = model_class(lowerCAmelCase__ )
lowercase__ : Optional[int] = len(model(lowerCAmelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ , saved_model=lowerCAmelCase__ )
lowercase__ : List[Any] = os.path.join(lowerCAmelCase__ , "saved_model" , "1" )
lowercase__ : Any = tf.keras.models.load_model(lowerCAmelCase__ )
lowercase__ : Optional[Any] = model(lowerCAmelCase__ )
if self.is_encoder_decoder:
lowercase__ : Optional[Any] = outputs["encoder_hidden_states"]
lowercase__ : Optional[Any] = outputs["encoder_attentions"]
else:
lowercase__ : Any = outputs["hidden_states"]
lowercase__ : str = outputs["attentions"]
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
lowercase__ : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(lowerCAmelCase__ )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
lowercase__ : Union[str, Any] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
lowercase__ : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
lowercase__ : Dict = getattr(self.model_tester , "key_length" , lowerCAmelCase__ )
lowercase__ : Optional[Any] = getattr(self.model_tester , "key_length" , lowerCAmelCase__ )
def check_decoder_attentions_output(SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : Optional[int] = len(lowerCAmelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowercase__ : str = outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(SCREAMING_SNAKE_CASE : int ):
lowercase__ : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : Optional[Any] = False
lowercase__ : str = model_class(lowerCAmelCase__ )
lowercase__ : Any = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowercase__ : List[str] = len(lowerCAmelCase__ )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
if self.is_encoder_decoder:
lowercase__ : Tuple = model_class(lowerCAmelCase__ )
lowercase__ : Any = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_decoder_attentions_output(lowerCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase__ : int = True
lowercase__ : Optional[int] = model_class(lowerCAmelCase__ )
lowercase__ : List[str] = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
# Check attention is always last and order is fine
lowercase__ : Tuple = True
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(lowerCAmelCase__ )
lowercase__ : Tuple = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
@require_tf
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : int ):
lowercase__ : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
lowercase__ : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ : Optional[Any] = model(lowerCAmelCase__ )[0]
lowercase__ : List[str] = [1, 6, 768]
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__ : Optional[Any] = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
| 130 | '''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"-m", "--pretrained_model_name_or_path", type=a_, default=a_, required=a_, help="Path to pretrained model or model identifier from huggingface.co/models.", )
parser.add_argument(
"-c", "--caption", type=a_, default="robotic cat with wings", help="Text used to generate images.", )
parser.add_argument(
"-n", "--images_num", type=a_, default=4, help="How much images to generate.", )
parser.add_argument(
"-s", "--seed", type=a_, default=42, help="Seed for random process.", )
parser.add_argument(
"-ci", "--cuda_id", type=a_, default=0, help="cuda_id.", )
_UpperCAmelCase : Any = parser.parse_args()
return args
def __UpperCAmelCase ( a_: Any, a_: List[Any], a_: Optional[Any] ):
if not len(a_ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = imgs[0].size
_UpperCAmelCase : Union[str, Any] = Image.new("RGB", size=(cols * w, rows * h) )
_UpperCAmelCase , _UpperCAmelCase : Any = grid.size
for i, img in enumerate(a_ ):
grid.paste(a_, box=(i % cols * w, i // cols * h) )
return grid
def __UpperCAmelCase ( a_: List[str], a_: Optional[int]="robotic cat with wings", a_: List[str]=7.5, a_: Optional[int]=50, a_: List[Any]=1, a_: Union[str, Any]=42, ):
_UpperCAmelCase : Optional[Any] = torch.Generator(pipeline.device ).manual_seed(a_ )
_UpperCAmelCase : Dict = pipeline(
a_, guidance_scale=a_, num_inference_steps=a_, generator=a_, num_images_per_prompt=a_, ).images
_UpperCAmelCase : Any = int(math.sqrt(a_ ) )
_UpperCAmelCase : List[Any] = image_grid(a_, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
__a = parse_args()
# Load models and create wrapper for stable diffusion
__a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__a = unet.to(torch.device('cuda', args.cuda_id))
__a = pipeline.to(unet.device)
__a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1))) | 145 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A :
def __init__( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]=13 , lowerCAmelCase_ : Dict=30 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : Optional[Any]=37 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : int=10 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=2 , ) -> Optional[int]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = scope
_a = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_a = (image_size // patch_size) ** 2
_a = num_patches + 2
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
_a = DeiTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a = DeiTForMaskedImageModeling(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_a = 1
_a = DeiTForMaskedImageModeling(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_a = self.type_sequence_label_size
_a = DeiTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a = 1
_a = DeiTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = DeiTModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
_a = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
if not self.model_tester.is_training:
return
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_a = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_a = model(**lowerCAmelCase_ ).loss
loss.backward()
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_a = False
_a = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_a = model_class(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase_ )
model.train()
_a = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_a = model(**lowerCAmelCase_ ).loss
loss.backward()
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase_ ),
*get_values(lowerCAmelCase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
_a = problem_type['''title''']
_a = problem_type['''num_labels''']
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_a = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if problem_type["num_labels"] > 1:
_a = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
_a = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase_ ) as warning_list:
_a = model(**lowerCAmelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def __lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DeiTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case_ ():
'''simple docstring'''
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_a = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
lowerCAmelCase_ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
# verify the logits
_a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_a = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_a = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' )
_a = inputs.pixel_values.to(lowerCAmelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_a = model(lowerCAmelCase_ )
| 179 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 179 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCamelCase = checkpoint['input_conv.weight_g']
UpperCamelCase = checkpoint['input_conv.weight_v']
UpperCamelCase = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
UpperCamelCase = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCamelCase = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCamelCase = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCamelCase = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCamelCase = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCamelCase = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCamelCase = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCamelCase = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCamelCase = checkpoint['output_conv.1.weight_g']
UpperCamelCase = checkpoint['output_conv.1.weight_v']
UpperCamelCase = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ , A__=None , A__=None , ) -> int:
"""simple docstring"""
if config_path is not None:
UpperCamelCase = SpeechTaHifiGanConfig.from_pretrained(A__ )
else:
UpperCamelCase = SpeechTaHifiGanConfig()
UpperCamelCase = SpeechTaHifiGan(A__ )
UpperCamelCase = torch.load(A__ )
load_weights(orig_checkpoint['model']['generator'] , A__ , A__ )
UpperCamelCase = np.load(A__ )
UpperCamelCase = stats[0].reshape(-1 )
UpperCamelCase = stats[1].reshape(-1 )
UpperCamelCase = torch.from_numpy(A__ ).float()
UpperCamelCase = torch.from_numpy(A__ ).float()
model.save_pretrained(A__ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a :List[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _lowercase ( __lowerCAmelCase ) -> List[str]:
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
SCREAMING_SNAKE_CASE__ : str = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = PegasusConfig(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusForConditionalGeneration(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE__ : Tuple = v.T
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE__ : Optional[int] = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE__ : Any = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE__ : int = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _lowercase ( __lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.train.list_variables(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : Any = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
SCREAMING_SNAKE_CASE__ : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE__ : str = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = array
return tf_weights
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
# save tokenizer first
SCREAMING_SNAKE_CASE__ : Any = Path(__lowerCAmelCase ).parent.name
SCREAMING_SNAKE_CASE__ : Dict = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
SCREAMING_SNAKE_CASE__ : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tf_weights_as_numpy(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
SCREAMING_SNAKE_CASE__ : Tuple = task_specific_params
SCREAMING_SNAKE_CASE__ : str = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
a :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
a :Optional[Any] = parser.parse_args()
if args.save_dir is None:
a :List[Any] = Path(args.tf_ckpt_path).parent.name
a :Optional[Any] = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 132 | 0 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__a: Optional[Any] = 637_8137.0
__a: Dict = 635_6752.31_4245
__a: int = 6_37_81_37
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[str] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowercase__ : str = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
lowercase__ : Union[str, Any] = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowercase__ : List[str] = haversine_distance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowercase__ : Optional[Any] = (b_lata + b_lata) / 2
lowercase__ : List[str] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowercase__ : Any = (sin(UpperCAmelCase ) ** 2) * (cos(UpperCAmelCase ) ** 2)
lowercase__ : Optional[Any] = cos(sigma / 2 ) ** 2
lowercase__ : List[str] = (sigma - sin(UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowercase__ : int = (cos(UpperCAmelCase ) ** 2) * (sin(UpperCAmelCase ) ** 2)
lowercase__ : Union[str, Any] = sin(sigma / 2 ) ** 2
lowercase__ : str = (sigma + sin(UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214 | '''simple docstring'''
from manim import *
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : int = Rectangle(height=0.5 , width=0.5 )
lowercase__ : Optional[int] = Rectangle(height=0.2_5 , width=0.2_5 )
lowercase__ : Tuple = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowercase__ : str = [mem.copy() for i in range(6 )]
lowercase__ : Dict = [mem.copy() for i in range(6 )]
lowercase__ : Tuple = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : List[str] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : str = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : str = Text('''CPU''' , font_size=24 )
lowercase__ : List[Any] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
lowercase__ : Any = [mem.copy() for i in range(4 )]
lowercase__ : int = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Optional[int] = Text('''GPU''' , font_size=24 )
lowercase__ : Tuple = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
lowercase__ : int = [mem.copy() for i in range(6 )]
lowercase__ : Optional[int] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Any = Text('''Model''' , font_size=24 )
lowercase__ : Optional[Any] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
lowercase__ : int = []
lowercase__ : int = []
lowercase__ : Any = []
for i, rect in enumerate(__lowerCAmelCase ):
rect.set_stroke(__lowerCAmelCase )
lowercase__ : Optional[int] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowerCAmelCase , buff=0.0 )
self.add(__lowerCAmelCase )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase )
lowercase__ : Optional[int] = [mem.copy() for i in range(6 )]
lowercase__ : List[Any] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Optional[Any] = Text('''Loaded Checkpoint''' , font_size=24 )
lowercase__ : Dict = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowerCAmelCase )
lowercase__ : str = []
lowercase__ : List[str] = []
for i, rect in enumerate(__lowerCAmelCase ):
lowercase__ : List[str] = fill.copy().set_fill(__lowerCAmelCase , opacity=0.7 )
target.move_to(__lowerCAmelCase )
ckpt_arr.append(__lowerCAmelCase )
lowercase__ : Any = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
lowercase__ : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ : Optional[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : str = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
lowercase__ : Union[str, Any] = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowercase__ : Tuple = [meta_mem.copy() for i in range(6 )]
lowercase__ : Any = [meta_mem.copy() for i in range(6 )]
lowercase__ : str = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Any = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Union[str, Any] = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase__ : Union[str, Any] = Text('''Disk''' , font_size=24 )
lowercase__ : Tuple = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , Write(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) )
lowercase__ : Tuple = []
for i, rect in enumerate(__lowerCAmelCase ):
lowercase__ : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowerCAmelCase , run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(FadeOut(__lowerCAmelCase ) )
lowercase__ : Dict = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
self.play(
FadeOut(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase ) , )
self.wait()
| 214 | 1 |
'''simple docstring'''
import math
def a ( __a , __a = 0 , __a = 0 ) -> list:
'''simple docstring'''
UpperCamelCase__ :Dict = end or len(__a )
for i in range(__a , __a ):
UpperCamelCase__ :Tuple = i
UpperCamelCase__ :Union[str, Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCamelCase__ :Any = array[temp_index - 1]
temp_index -= 1
UpperCamelCase__ :Union[str, Any] = temp_index_value
return array
def a ( __a , __a , __a ) -> None: # Max Heap
'''simple docstring'''
UpperCamelCase__ :Dict = index
UpperCamelCase__ :Optional[Any] = 2 * index + 1 # Left Node
UpperCamelCase__ :Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCamelCase__ :Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCamelCase__ :Dict = right_index
if largest != index:
UpperCamelCase__ , UpperCamelCase__ :Tuple = array[largest], array[index]
heapify(__a , __a , __a )
def a ( __a ) -> list:
'''simple docstring'''
UpperCamelCase__ :List[str] = len(__a )
for i in range(n // 2 , -1 , -1 ):
heapify(__a , __a , __a )
for i in range(n - 1 , 0 , -1 ):
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = array[0], array[i]
heapify(__a , 0 , __a )
return array
def a ( __a , __a , __a , __a ) -> int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a ( __a , __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :str = low
UpperCamelCase__ :Dict = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCamelCase__ , UpperCamelCase__ :int = array[j], array[i]
i += 1
def a ( __a ) -> list:
'''simple docstring'''
if len(__a ) == 0:
return array
UpperCamelCase__ :Tuple = 2 * math.ceil(math.loga(len(__a ) ) )
UpperCamelCase__ :Optional[Any] = 16
return intro_sort(__a , 0 , len(__a ) , __a , __a )
def a ( __a , __a , __a , __a , __a ) -> list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__a )
max_depth -= 1
UpperCamelCase__ :Any = median_of_a(__a , __a , start + ((end - start) // 2) + 1 , end - 1 )
UpperCamelCase__ :int = partition(__a , __a , __a , __a )
intro_sort(__a , __a , __a , __a , __a )
UpperCamelCase__ :Union[str, Any] = p
return insertion_sort(__a , __a , __a )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma : ''').strip()
__snake_case = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted)) | 97 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Any = """mobilenet_v1"""
def __init__( self : List[Any] , a__ : Tuple=3 , a__ : Union[str, Any]=224 , a__ : int=1.0 , a__ : Tuple=8 , a__ : Optional[int]="relu6" , a__ : Any=True , a__ : str=0.999 , a__ : Optional[int]=0.02 , a__ : Optional[int]=0.001 , **a__ : int , ):
super().__init__(**a__ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = depth_multiplier
__magic_name__ = min_depth
__magic_name__ = hidden_act
__magic_name__ = tf_padding
__magic_name__ = classifier_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[Any] = version.parse("""1.11""" )
@property
def snake_case__ ( self : Union[str, Any] ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def snake_case__ ( self : Any ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def snake_case__ ( self : Optional[Any] ):
return 1E-4
| 363 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase = get_tests_dir("fixtures")
_lowerCAmelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_lowerCAmelCase = get_tests_dir("fixtures/dummy-config.json")
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = 0
def snake_case__ ( self : Optional[int] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ ).to_dict()
config_dict.pop('''feature_extractor_type''' )
__magic_name__ = WavaVecaFeatureExtractor(**a__ )
# save in new folder
model_config.save_pretrained(a__ )
config.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
# make sure private variable is not incorrectly saved
__magic_name__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
a__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
a__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ , revision='''aaaaaa''' )
def snake_case__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
a__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case__ ( self : Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a__ ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a__ ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ , trust_remote_code=a__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def snake_case__ ( self : int ):
try:
AutoConfig.register('''custom''' , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
AutoFeatureExtractor.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__magic_name__ = CustomFeatureExtractor.from_pretrained(a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self : int ):
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = True
try:
AutoConfig.register('''custom''' , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# If remote code is not set, the default is to use local
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(a__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 98 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase ( lowercase_ ):
'''simple docstring'''
__UpperCamelCase : int = '''wav2vec2'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : List[str]=7_6_8 , lowerCAmelCase_ : Dict=1_2 , lowerCAmelCase_ : Tuple=1_2 , lowerCAmelCase_ : List[str]=3_0_7_2 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : List[str]=1e-5 , lowerCAmelCase_ : Union[str, Any]="group" , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Union[str, Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase_ : Any=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase_ : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : str=1_2_8 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=0.05 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[str]=1_0 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=3_2_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Union[str, Any]=1_0_0 , lowerCAmelCase_ : Dict=2_5_6 , lowerCAmelCase_ : Tuple=2_5_6 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Tuple="sum" , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Any=2_5_6 , lowerCAmelCase_ : List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCAmelCase_ : Optional[int]=(5, 3, 3, 1, 1) , lowerCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , lowerCAmelCase_ : List[Any]=5_1_2 , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
_A: str = hidden_size
_A: Optional[int] = feat_extract_norm
_A: Dict = feat_extract_activation
_A: str = list(lowerCAmelCase_ )
_A: Union[str, Any] = list(lowerCAmelCase_ )
_A: Union[str, Any] = list(lowerCAmelCase_ )
_A: Dict = conv_bias
_A: Optional[int] = num_conv_pos_embeddings
_A: List[str] = num_conv_pos_embedding_groups
_A: Union[str, Any] = len(self.conv_dim )
_A: List[str] = num_hidden_layers
_A: int = intermediate_size
_A: Union[str, Any] = hidden_act
_A: Union[str, Any] = num_attention_heads
_A: List[str] = hidden_dropout
_A: Dict = attention_dropout
_A: Optional[Any] = activation_dropout
_A: Union[str, Any] = feat_proj_dropout
_A: List[Any] = final_dropout
_A: Optional[Any] = layerdrop
_A: Tuple = layer_norm_eps
_A: Dict = initializer_range
_A: Dict = vocab_size
_A: int = do_stable_layer_norm
_A: Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A: List[str] = apply_spec_augment
_A: str = mask_time_prob
_A: List[str] = mask_time_length
_A: Tuple = mask_time_min_masks
_A: Union[str, Any] = mask_feature_prob
_A: List[Any] = mask_feature_length
_A: Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_A: Optional[int] = num_codevectors_per_group
_A: Any = num_codevector_groups
_A: str = contrastive_logits_temperature
_A: List[str] = feat_quantizer_dropout
_A: Dict = num_negatives
_A: Tuple = codevector_dim
_A: str = proj_codevector_dim
_A: Any = diversity_loss_weight
# ctc loss
_A: List[Any] = ctc_loss_reduction
_A: List[str] = ctc_zero_infinity
# adapter
_A: Dict = add_adapter
_A: int = adapter_kernel_size
_A: str = adapter_stride
_A: Any = num_adapter_layers
_A: int = output_hidden_size or hidden_size
_A: Tuple = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_A: List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_A: Union[str, Any] = list(lowerCAmelCase_ )
_A: str = list(lowerCAmelCase_ )
_A: str = list(lowerCAmelCase_ )
_A: str = xvector_output_dim
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 121 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(__snake_case : int, __snake_case : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
A__ : int =update_area_of_max_square(__snake_case, col + 1 )
A__ : int =update_area_of_max_square(row + 1, col + 1 )
A__ : int =update_area_of_max_square(row + 1, __snake_case )
if mat[row][col]:
A__ : Optional[Any] =1 + min([right, diagonal, down] )
A__ : Dict =max(largest_square_area[0], __snake_case )
return sub_problem_sol
else:
return 0
A__ : List[Any] =[0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
A__ : str =update_area_of_max_square_using_dp_array(__snake_case, col + 1, __snake_case )
A__ : Any =update_area_of_max_square_using_dp_array(row + 1, col + 1, __snake_case )
A__ : List[str] =update_area_of_max_square_using_dp_array(row + 1, __snake_case, __snake_case )
if mat[row][col]:
A__ : Optional[int] =1 + min([right, diagonal, down] )
A__ : Any =max(largest_square_area[0], __snake_case )
A__ : Union[str, Any] =sub_problem_sol
return sub_problem_sol
else:
return 0
A__ : Any =[0]
A__ : Optional[Any] =[[-1] * cols for _ in range(__snake_case )]
update_area_of_max_square_using_dp_array(0, 0, __snake_case )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Optional[int] =[[0] * (cols + 1) for _ in range(rows + 1 )]
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : List[Any] =dp_array[row][col + 1]
A__ : List[str] =dp_array[row + 1][col + 1]
A__ : str =dp_array[row + 1][col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Optional[Any] =max(dp_array[row][col], __snake_case )
else:
A__ : Tuple =0
return largest_square_area
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =[0] * (cols + 1)
A__ : int =[0] * (cols + 1)
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : Union[str, Any] =current_row[col + 1]
A__ : List[str] =next_row[col + 1]
A__ : str =next_row[col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Dict =max(current_row[col], __snake_case )
else:
A__ : str =0
A__ : Optional[Any] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 134 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Optional[Any] = seq_length
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : Tuple = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : str = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : List[Any] = scope
_lowerCAmelCase : int = range_bbox
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
# convert bbox to numpy since TF does not support item assignment
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : List[Any] = bbox[i, j, 3]
_lowerCAmelCase : str = bbox[i, j, 1]
_lowerCAmelCase : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : Dict = bbox[i, j, 2]
_lowerCAmelCase : int = bbox[i, j, 0]
_lowerCAmelCase : Optional[int] = t
_lowerCAmelCase : Any = tf.convert_to_tensor(__a)
_lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Dict = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : List[str] = LayoutLMConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFLayoutLMModel(config=__a)
_lowerCAmelCase : Any = model(__a, __a, attention_mask=__a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, __a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, __a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = TFLayoutLMForMaskedLM(config=__a)
_lowerCAmelCase : int = model(__a, __a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.num_labels
_lowerCAmelCase : Dict = TFLayoutLMForSequenceClassification(config=__a)
_lowerCAmelCase : Optional[int] = model(__a, __a, attention_mask=__a, token_type_ids=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : List[str] = TFLayoutLMForTokenClassification(config=__a)
_lowerCAmelCase : List[Any] = model(__a, __a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFLayoutLMForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = model(__a, __a, attention_mask=__a, token_type_ids=__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Dict = config_and_inputs
_lowerCAmelCase : Optional[int] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = 10
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFLayoutLMModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFLayoutLMModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@unittest.skip("Onnx compliancy broke with TF 2.10")
def snake_case__ ( self):
'''simple docstring'''
pass
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowerCAmelCase : str = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowerCAmelCase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowerCAmelCase : List[Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased")
_lowerCAmelCase : Any = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : str = model(input_ids=__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
# test the sequence output on [0, :3, :3]
_lowerCAmelCase : List[Any] = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]], )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], __a, atol=1E-3))
# test the pooled output on [1, :3]
_lowerCAmelCase : Dict = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552])
self.assertTrue(np.allclose(outputs.pooler_output[1, :3], __a, atol=1E-3))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2)
_lowerCAmelCase : int = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : int = model(
input_ids=__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=tf.convert_to_tensor([1, 1]), )
# test whether we get a loss as a scalar
_lowerCAmelCase : Dict = outputs.loss
_lowerCAmelCase : str = (2,)
self.assertEqual(loss.shape, __a)
# test the shape of the logits
_lowerCAmelCase : int = outputs.logits
_lowerCAmelCase : int = (2, 2)
self.assertEqual(logits.shape, __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=13)
_lowerCAmelCase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : Tuple = model(
input_ids=__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a)
# test the shape of the logits
_lowerCAmelCase : int = outputs.logits
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor((2, 25, 13))
self.assertEqual(logits.shape, __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased")
_lowerCAmelCase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowerCAmelCase : str = model(input_ids=__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
# test the shape of the logits
_lowerCAmelCase : List[Any] = tf.convert_to_tensor((2, 25))
self.assertEqual(outputs.start_logits.shape, __a)
self.assertEqual(outputs.end_logits.shape, __a)
| 358 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300 | 0 |
from collections.abc import Generator
from math import sin
def lowerCAmelCase__( lowercase : str ) -> bytes:
if len(UpperCamelCase__ ) != 32:
raise ValueError("Input must be of length 32" )
__snake_case : Any = B""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowerCAmelCase__( lowercase : str ) -> bytes:
if i < 0:
raise ValueError("Input must be non-negative" )
__snake_case : Any = format(UpperCamelCase__ , "08x" )[-8:]
__snake_case : List[str] = B""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowerCAmelCase__( lowercase : str ) -> bytes:
__snake_case : List[Any] = B""
for char in message:
bit_string += format(UpperCamelCase__ , "08b" ).encode("utf-8" )
__snake_case : List[str] = format(len(UpperCamelCase__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Generator[list[int], None, None]:
if len(UpperCamelCase__ ) % 512 != 0:
raise ValueError("Input must have length that\'s a multiple of 512" )
for pos in range(0 , len(UpperCamelCase__ ) , 512 ):
__snake_case : int = bit_string[pos : pos + 512]
__snake_case : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowerCAmelCase__( lowercase : Optional[Any] ) -> int:
if i < 0:
raise ValueError("Input must be non-negative" )
__snake_case : Any = format(UpperCamelCase__ , "032b" )
__snake_case : List[str] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase__ , 2 )
def lowerCAmelCase__( lowercase : Any , lowercase : Dict ) -> int:
return (a + b) % 2**32
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : List[str] ) -> int:
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowerCAmelCase__( lowercase : Optional[int] ) -> bytes:
__snake_case : Union[str, Any] = preprocess(UpperCamelCase__ )
__snake_case : Optional[int] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__snake_case : Union[str, Any] = 0X67_452_301
__snake_case : List[Any] = 0Xef_cda_b89
__snake_case : Optional[int] = 0X98_bad_cfe
__snake_case : List[Any] = 0X10_325_476
__snake_case : List[str] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase__ ):
__snake_case : Tuple = aa
__snake_case : str = ba
__snake_case : Dict = ca
__snake_case : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case : Optional[int] = d ^ (b & (c ^ d))
__snake_case : str = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case : Optional[int] = c ^ (d & (b ^ c))
__snake_case : Dict = (5 * i + 1) % 16
elif i <= 47:
__snake_case : Tuple = b ^ c ^ d
__snake_case : Optional[int] = (3 * i + 5) % 16
else:
__snake_case : str = c ^ (b | not_aa(UpperCamelCase__ ))
__snake_case : Union[str, Any] = (7 * i) % 16
__snake_case : Any = (f + a + added_consts[i] + block_words[g]) % 2**32
__snake_case : Any = d
__snake_case : int = c
__snake_case : int = b
__snake_case : List[Any] = sum_aa(UpperCamelCase__ , left_rotate_aa(UpperCamelCase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case : Dict = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
__snake_case : Optional[int] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
__snake_case : Optional[int] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
__snake_case : Optional[Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
__snake_case : Optional[int] = reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
lowerCAmelCase_ = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ = {f'''funnel-transformer/{name}''': 5_12 for name in _model_names}
lowerCAmelCase_ = {f'''funnel-transformer/{name}''': {'do_lower_case': True} for name in _model_names}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = FunnelTokenizer
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : int = 2
def __init__( self : List[Any] , _A : Union[str, Any]=None , _A : List[Any]=None , _A : Optional[int]=True , _A : Dict="<unk>" , _A : Optional[int]="<sep>" , _A : Tuple="<pad>" , _A : Union[str, Any]="<cls>" , _A : Dict="<mask>" , _A : List[Any]="<s>" , _A : Dict="</s>" , _A : Tuple=True , _A : Dict=True , _A : int=None , _A : List[Any]="##" , **_A : int , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , bos_token=_A , eos_token=_A , clean_text=_A , tokenize_chinese_chars=_A , strip_accents=_A , wordpieces_prefix=_A , **_A , )
lowercase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
lowercase : Union[str, Any] = getattr(_A , normalizer_state.pop('''type''' ) )
lowercase : int = do_lower_case
lowercase : Optional[int] = strip_accents
lowercase : str = tokenize_chinese_chars
lowercase : Dict = normalizer_class(**_A )
lowercase : int = do_lower_case
def __a ( self : int , _A : Optional[Any] , _A : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
lowercase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase : Tuple = [self.sep_token_id]
lowercase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase : List[str] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A ) | 116 |
lowerCAmelCase_ = range(2, 20 + 1)
lowerCAmelCase_ = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase_ = {}
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : str = sum(a_i[j] for j in range(__magic_name__ , len(__magic_name__ ) ) )
lowercase : Any = sum(a_i[j] * base[j] for j in range(min(len(__magic_name__ ) , __magic_name__ ) ) )
lowercase , lowercase : Optional[int] = 0, 0
lowercase : str = n - i
lowercase : Optional[int] = memo.get(__magic_name__ )
if sub_memo is not None:
lowercase : List[str] = sub_memo.get(__magic_name__ )
if jumps is not None and len(__magic_name__ ) > 0:
# find and make the largest jump without going over
lowercase : Dict = -1
for _k in range(len(__magic_name__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase : Any = _k
break
if max_jump >= 0:
lowercase , lowercase , lowercase : List[str] = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase : str = diff + c
for j in range(min(__magic_name__ , len(__magic_name__ ) ) ):
lowercase , lowercase : Optional[Any] = divmod(__magic_name__ , 10 )
if new_c > 0:
add(__magic_name__ , __magic_name__ , __magic_name__ )
else:
lowercase : Dict = []
else:
lowercase : Union[str, Any] = {c: []}
lowercase : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase , lowercase : str = next_term(__magic_name__ , k - 1 , i + dn , __magic_name__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase , lowercase : Optional[Any] = compute(__magic_name__ , __magic_name__ , i + dn , __magic_name__ )
diff += _diff
dn += terms_jumped
lowercase : Optional[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase : List[Any] = 0
while j < len(__magic_name__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__magic_name__ , (diff, dn, k) )
return (diff, dn)
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(__magic_name__ ):
a_i.extend([0 for _ in range(k - len(__magic_name__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase : Optional[Any] = i
lowercase , lowercase , lowercase : List[str] = 0, 0, 0
for j in range(len(__magic_name__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase : List[str] = ds_c + ds_b
diff += addend
lowercase : Tuple = 0
for j in range(__magic_name__ ):
lowercase : int = a_i[j] + addend
lowercase , lowercase : Any = divmod(__magic_name__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__magic_name__ , __magic_name__ , __magic_name__ )
return diff, i - start_i
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
for j in range(__magic_name__ , len(__magic_name__ ) ):
lowercase : Any = digits[j] + addend
if s >= 10:
lowercase , lowercase : List[str] = divmod(__magic_name__ , 10 )
lowercase : List[str] = addend // 10 + quotient
else:
lowercase : Optional[Any] = s
lowercase : Tuple = addend // 10
if addend == 0:
break
while addend > 0:
lowercase , lowercase : str = divmod(__magic_name__ , 10 )
digits.append(__magic_name__ )
def snake_case( __magic_name__ = 10**15 ) -> int:
'''simple docstring'''
lowercase : List[Any] = [1]
lowercase : List[Any] = 1
lowercase : str = 0
while True:
lowercase , lowercase : str = next_term(__magic_name__ , 20 , i + dn , __magic_name__ )
dn += terms_jumped
if dn == n - i:
break
lowercase : str = 0
for j in range(len(__magic_name__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''') | 116 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ):
if start is None:
_UpperCAmelCase : List[Any] = 0
if end is None:
_UpperCAmelCase : Dict = len(__lowerCAmelCase ) - 1
if start >= end:
return
_UpperCAmelCase : List[str] = (start + end) // 2
slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
if sequence[end] < sequence[mid]:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = sequence[mid], sequence[end]
slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 234 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
lowerCamelCase__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Any = BertTokenizer
def __init__( self : int , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : str=True , lowerCamelCase__ : Tuple="[UNK]" , lowerCamelCase__ : str="[SEP]" , lowerCamelCase__ : Optional[Any]="[PAD]" , lowerCamelCase__ : List[str]="[CLS]" , lowerCamelCase__ : Union[str, Any]="[MASK]" , lowerCamelCase__ : str=True , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars
):
_UpperCAmelCase : str = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) )
_UpperCAmelCase : Optional[Any] = do_lower_case
_UpperCAmelCase : Any = strip_accents
_UpperCAmelCase : List[Any] = tokenize_chinese_chars
_UpperCAmelCase : int = normalizer_class(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = do_lower_case
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=None ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 234 | 1 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_A : List[Any] =logging.get_logger(__name__)
class _lowercase ( _lowercase ):
def __init__( self: Dict , UpperCamelCase__: Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
lowerCamelCase__ : Optional[int] = nn.ModuleList(UpperCamelCase__ )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: torch.FloatTensor , UpperCamelCase__: Union[torch.Tensor, float, int] , UpperCamelCase__: torch.Tensor , UpperCamelCase__: List[torch.tensor] , UpperCamelCase__: List[float] , UpperCamelCase__: Optional[torch.Tensor] = None , UpperCamelCase__: Optional[torch.Tensor] = None , UpperCamelCase__: Optional[torch.Tensor] = None , UpperCamelCase__: Optional[Dict[str, Any]] = None , UpperCamelCase__: bool = False , UpperCamelCase__: bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(UpperCamelCase__ , UpperCamelCase__ , self.nets ) ):
lowerCamelCase__ : Optional[Any] = controlnet(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
# merge samples
if i == 0:
lowerCamelCase__ : int = down_samples, mid_sample
else:
lowerCamelCase__ : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCamelCase__ , UpperCamelCase__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCamelCase_ ( self: int , UpperCamelCase__: Union[str, os.PathLike] , UpperCamelCase__: bool = True , UpperCamelCase__: Callable = None , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[str] = None , ):
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCamelCase__ , is_main_process=UpperCamelCase__ , save_function=UpperCamelCase__ , safe_serialization=UpperCamelCase__ , variant=UpperCamelCase__ , )
idx += 1
lowerCamelCase__ : int = model_path_to_save + F'''_{idx}'''
@classmethod
def lowerCamelCase_ ( cls: Tuple , UpperCamelCase__: Optional[Union[str, os.PathLike]] , **UpperCamelCase__: int ):
lowerCamelCase__ : str = 0
lowerCamelCase__ : List[str] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCamelCase__ : Optional[int] = pretrained_model_path
while os.path.isdir(UpperCamelCase__ ):
lowerCamelCase__ : List[str] = ControlNetModel.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
controlnets.append(UpperCamelCase__ )
idx += 1
lowerCamelCase__ : Optional[int] = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(UpperCamelCase__ )} controlnets loaded from {pretrained_model_path}.''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(UpperCamelCase__ )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(UpperCamelCase__ )
| 364 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_A : Optional[Any] =typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_A : Optional[int] =typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(UpperCamelCase ) - np.asarray(UpperCamelCase )) ** 2 ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase , UpperCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_ () -> None:
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
benchmark()
| 129 | 0 |
def A ( a_ = 1_000 ) -> int:
return sum(e for e in range(3 ,a_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"{solution() = }")
| 71 |
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> qiskit.result.counts.Counts:
A__ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
A__ = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
A__ = qiskit.execute(lowercase_ , lowercase_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 247 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase :int = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase :Optional[Any] = 5_0_0_0_3
lowerCamelCase :List[Any] = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = PLBartTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : str = False
def _a (self ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ : str = PLBartTokenizer(lowercase , language_codes="""base""" , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
A_ : Optional[int] = PLBartTokenizer(lowercase , language_codes="""base""" , keep_accents=lowercase )
A_ : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A_ : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ : Any = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A_ : Tuple = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A_ : int = tokenizer.vocab_size
A_ : Tuple = [tokenizer.convert_ids_to_tokens(lowercase ) for x in range(end - 4 , lowercase )]
self.assertListEqual(lowercase , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
A_ : Optional[int] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A_ : Any = tokenizer(lowercase ).input_ids
self.assertEqual(
tokenizer.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) , lowercase , )
def _a (self ):
A_ : Union[str, Any] = PLBartTokenizer(lowercase , language_codes="""multi""" , keep_accents=lowercase )
A_ : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A_ : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ : Dict = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A_ : Any = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
A_ : Optional[int] = tokenizer.vocab_size
A_ : Optional[int] = [tokenizer.convert_ids_to_tokens(lowercase ) for x in range(end - 7 , lowercase )]
self.assertListEqual(
lowercase , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
A_ : Union[str, Any] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A_ : List[Any] = tokenizer(lowercase ).input_ids
self.assertEqual(
tokenizer.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) , lowercase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = 'uclanlp/plbart-python-en_XX'
__SCREAMING_SNAKE_CASE : Any = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__SCREAMING_SNAKE_CASE : List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__SCREAMING_SNAKE_CASE : Optional[Any] = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def _a (cls ):
A_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
A_ : int = 1
return cls
def _a (self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 50003 )
def _a (self ):
A_ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
def _a (self ):
self.assertIn(lowercase , self.tokenizer.all_special_ids )
A_ : Dict = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
A_ : str = self.tokenizer.decode(lowercase , skip_special_tokens=lowercase )
A_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase )
self.assertEqual(lowercase , lowercase )
self.assertNotIn(self.tokenizer.eos_token , lowercase )
def _a (self ):
A_ : List[str] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , lowercase )
A_ : List[str] = 10
A_ : int = self.tokenizer(lowercase , max_length=lowercase , truncation=lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase )
self.assertEqual(len(lowercase ) , lowercase )
def _a (self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [50004, 50001] )
def _a (self ):
A_ : Optional[int] = tempfile.mkdtemp()
A_ : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase )
A_ : Optional[int] = PLBartTokenizer.from_pretrained(lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase )
@require_torch
def _a (self ):
A_ : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase , return_tensors="""pt""" )
A_ : Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowercase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _a (self ):
A_ : Any = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A_ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _a (self ):
A_ : int = self.tokenizer(self.src_text , padding=lowercase , truncation=lowercase , max_length=3 , return_tensors="""pt""" )
A_ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=10 , return_tensors="""pt""" )
A_ : List[str] = targets["""input_ids"""]
A_ : Any = shift_tokens_right(lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _a (self ):
A_ : Union[str, Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowercase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50001,
} , ) | 135 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase :Dict = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 135 | 1 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def lowercase ( a__ : int ) -> Optional[Any]:
print('''Loading config file...''' )
def flatten_yaml_as_dict(a__ : Dict , a__ : Union[str, Any]="" , a__ : Union[str, Any]="." ):
_UpperCamelCase = []
for k, v in d.items():
_UpperCamelCase = parent_key + sep + k if parent_key else k
if isinstance(a__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(a__ , a__ , sep=a__ ).items() )
else:
items.append((new_key, v) )
return dict(a__ )
_UpperCamelCase = argparse.Namespace()
with open(a__ , '''r''' ) as yaml_file:
try:
_UpperCamelCase = yaml.load(a__ , Loader=yaml.FullLoader )
_UpperCamelCase = flatten_yaml_as_dict(a__ )
for k, v in flat_cfg.items():
setattr(a__ , a__ , a__ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(a__ , str(a__ ) ) )
return config
def lowercase ( a__ : str , a__ : Optional[int] ) -> str:
_UpperCamelCase = MobileViTVaConfig()
_UpperCamelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
_UpperCamelCase = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
_UpperCamelCase = 384
else:
_UpperCamelCase = 256
_UpperCamelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
_UpperCamelCase = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
_UpperCamelCase = 384
else:
_UpperCamelCase = 256
_UpperCamelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
_UpperCamelCase = 151
_UpperCamelCase = 512
_UpperCamelCase = '''ade20k-id2label.json'''
_UpperCamelCase = True
elif task_name.startswith('''voc_''' ):
_UpperCamelCase = 21
_UpperCamelCase = 512
_UpperCamelCase = '''pascal-voc-id2label.json'''
_UpperCamelCase = True
# orig_config
_UpperCamelCase = load_orig_config_file(a__ )
assert getattr(a__ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
_UpperCamelCase = getattr(a__ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(a__ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_UpperCamelCase = getattr(a__ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_UpperCamelCase = getattr(a__ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
_UpperCamelCase = getattr(a__ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
_UpperCamelCase = getattr(a__ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
_UpperCamelCase = getattr(a__ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='''dataset''' ) , '''r''' ) )
_UpperCamelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowercase ( a__ : Union[str, Any] , a__ : Any , a__ : str ) -> Union[str, Any]:
_UpperCamelCase = dct.pop(a__ )
_UpperCamelCase = val
def lowercase ( a__ : List[str] , a__ : Optional[Any]=False ) -> Dict:
if base_model:
_UpperCamelCase = ''''''
else:
_UpperCamelCase = '''mobilevitv2.'''
_UpperCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_UpperCamelCase = k[8:]
else:
_UpperCamelCase = k
if ".block." in k:
_UpperCamelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
_UpperCamelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
_UpperCamelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
_UpperCamelCase = k_new.replace('''conv_1.''' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
_UpperCamelCase = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_UpperCamelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
_UpperCamelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
_UpperCamelCase = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
_UpperCamelCase = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
_UpperCamelCase = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_UpperCamelCase = [0, 1]
elif i == 4:
_UpperCamelCase = [0, 1, 2, 3]
elif i == 5:
_UpperCamelCase = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
_UpperCamelCase = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
_UpperCamelCase = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
_UpperCamelCase = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_UpperCamelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
_UpperCamelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
_UpperCamelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
_UpperCamelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
_UpperCamelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
_UpperCamelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
_UpperCamelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
_UpperCamelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
_UpperCamelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def lowercase ( a__ : str ) -> Tuple:
_UpperCamelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(a__ )
for k in keys_to_ignore:
state_dict.pop(a__ , a__ )
def lowercase ( ) -> Union[str, Any]:
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def lowercase ( a__ : Optional[int] , a__ : str , a__ : int , a__ : List[str] ) -> Optional[Any]:
_UpperCamelCase = get_mobilevitva_config(a__ , a__ )
# load original state_dict
_UpperCamelCase = torch.load(a__ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
_UpperCamelCase = MobileViTVaForSemanticSegmentation(a__ ).eval()
_UpperCamelCase = False
else:
_UpperCamelCase = MobileViTVaForImageClassification(a__ ).eval()
_UpperCamelCase = False
# remove and rename some keys of load the original model
_UpperCamelCase = checkpoint
remove_unused_keys(a__ )
_UpperCamelCase = create_rename_keys(a__ , base_model=a__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a__ , a__ , a__ )
# load modified state_dict
model.load_state_dict(a__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_UpperCamelCase = model(**a__ )
# verify classification model
if task_name.startswith('''imagenet''' ):
_UpperCamelCase = outputs.logits
_UpperCamelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_UpperCamelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , a__ , atol=1e-4 )
Path(a__ ).mkdir(exist_ok=a__ )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCAmelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 256 | """simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _lowercase):
def __init__( self : List[Any] , __UpperCamelCase : VQModel , __UpperCamelCase : UNetaDModel , __UpperCamelCase : DDIMScheduler ) -> Optional[Any]:
super().__init__()
self.register_modules(vqvae=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __UpperCamelCase : int = 1 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 50 , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , **__UpperCamelCase : Optional[int] , ) -> Union[Tuple, ImagePipelineOutput]:
_UpperCamelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__UpperCamelCase , )
_UpperCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCamelCase = {}
if accepts_eta:
_UpperCamelCase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
_UpperCamelCase = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
_UpperCamelCase = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# decode the image latents with the VAE
_UpperCamelCase = self.vqvae.decode(__UpperCamelCase ).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 256 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A_ = 50_00_00
A_ , A_ = os.path.split(__file__)
A_ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def UpperCAmelCase__ (snake_case__ : datasets.Dataset , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : List[Any] = dataset.map(**snake_case__ )
@get_duration
def UpperCAmelCase__ (snake_case__ : datasets.Dataset , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : int = dataset.filter(**snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case : Tuple = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
_snake_case : Any = generate_example_dataset(
os.path.join(snake_case__ , """dataset.arrow""" ) , snake_case__ , num_examples=snake_case__ )
_snake_case : List[Any] = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=snake_case__ )
def tokenize(snake_case__ : Tuple ):
return tokenizer(examples["""text"""] )
_snake_case : List[Any] = map(snake_case__ )
_snake_case : List[Any] = map(snake_case__ , batched=snake_case__ )
_snake_case : Any = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type="""numpy""" ):
_snake_case : Dict = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type="""pandas""" ):
_snake_case : Any = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
_snake_case : Any = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
_snake_case : Optional[Any] = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ )
_snake_case : str = map(snake_case__ , function=snake_case__ , batched=snake_case__ )
_snake_case : List[Any] = filter(snake_case__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(snake_case__ , """wb""" ) as f:
f.write(json.dumps(snake_case__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 132 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : int = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> int:
return (pow(snake_case__ , 2 ) + step) % modulus
for _ in range(snake_case__ ):
# These track the position within the cycle detection logic.
_snake_case : Optional[int] = seed
_snake_case : str = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_snake_case : Any = rand_fn(snake_case__ , snake_case__ , snake_case__ )
_snake_case : Optional[Any] = rand_fn(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = rand_fn(snake_case__ , snake_case__ , snake_case__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_snake_case : str = gcd(hare - tortoise , snake_case__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_snake_case : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
A_ = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
A_ = parser.parse_args()
A_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
A_ = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 132 | 1 |
"""simple docstring"""
import os
import string
import sys
a :Union[str, Any] = 1 << 8
a :Optional[Any] = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a :Union[str, Any] = KEYMAP["up"]
a :int = KEYMAP["left"]
if sys.platform == "win32":
a :int = []
a :str = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a :Dict = ord(str(i))
def _lowercase ( ) -> str:
if os.name == "nt":
import msvcrt
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_UpperCamelCase ) == 0:
# Read the keystroke
SCREAMING_SNAKE_CASE__ : List[str] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
SCREAMING_SNAKE_CASE__ : Optional[int] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_UpperCamelCase )
if ord(_UpperCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = chr(KEYMAP["""esc"""] )
except KeyError:
SCREAMING_SNAKE_CASE__ : Dict = cha[1]
else:
SCREAMING_SNAKE_CASE__ : List[str] = ch.decode(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
SCREAMING_SNAKE_CASE__ : List[str] = sys.stdin.fileno()
SCREAMING_SNAKE_CASE__ : List[Any] = termios.tcgetattr(_UpperCamelCase )
try:
tty.setraw(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(_UpperCamelCase , termios.TCSADRAIN , _UpperCamelCase )
return ch
def _lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int = get_raw_chars()
if ord(_UpperCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_UpperCamelCase ) == KEYMAP["esc"]:
SCREAMING_SNAKE_CASE__ : List[str] = get_raw_chars()
if ord(_UpperCamelCase ) == KEYMAP["mod_int"]:
SCREAMING_SNAKE_CASE__ : Any = get_raw_chars()
if ord(_UpperCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_UpperCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_UpperCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 132 |
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ):
_snake_case = """ZinengTang/tvlt-base"""
_snake_case = tempfile.mkdtemp()
def lowercase (self , **UpperCAmelCase ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def lowercase (self , **UpperCAmelCase ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def lowercase (self ):
shutil.rmtree(self.tmpdirname )
def lowercase (self ):
_snake_case = self.get_image_processor()
_snake_case = self.get_feature_extractor()
_snake_case = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
_snake_case = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def lowercase (self ):
_snake_case = self.get_image_processor()
_snake_case = self.get_feature_extractor()
_snake_case = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
_snake_case = np.ones([12000] )
_snake_case = feature_extractor(__lowerCAmelCase , return_tensors="""np""" )
_snake_case = processor(audio=__lowerCAmelCase , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase (self ):
_snake_case = self.get_image_processor()
_snake_case = self.get_feature_extractor()
_snake_case = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
_snake_case = np.ones([3, 224, 224] )
_snake_case = image_processor(__lowerCAmelCase , return_tensors="""np""" )
_snake_case = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase (self ):
_snake_case = self.get_image_processor()
_snake_case = self.get_feature_extractor()
_snake_case = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
_snake_case = np.ones([12000] )
_snake_case = np.ones([3, 224, 224] )
_snake_case = processor(audio=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def lowercase (self ):
_snake_case = self.get_image_processor()
_snake_case = self.get_feature_extractor()
_snake_case = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , ) | 371 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
_snake_case = img
_snake_case = img.shape[1]
_snake_case = img.shape[0]
_snake_case = dst_width
_snake_case = dst_height
_snake_case = self.src_w / self.dst_w
_snake_case = self.src_h / self.dst_h
_snake_case = _snake_case = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowercase (self ) -> List[Any]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_snake_case = self.img[self.get_y(UpperCAmelCase )][self.get_x(UpperCAmelCase )]
def lowercase (self , UpperCAmelCase ) -> int:
return int(self.ratio_x * x )
def lowercase (self , UpperCAmelCase ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__lowerCAmelCase , __lowerCAmelCase = 800, 600
__lowerCAmelCase = imread('image_data/lena.jpg', 1)
__lowerCAmelCase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows() | 270 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE : Any = version.parse(accelerate.__version__ ).base_version
if version.parse(snake_case_ ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *a__ , **a__ ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *snake_case_ , **snake_case_ )
return wrapper
| 313 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCAmelCase_ ( snake_case_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : List[str] = model_type_to_module_name(snake_case_ )
_A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" )
try:
return getattr(snake_case_,snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : List[Any] = importlib.import_module("""transformers""" )
if hasattr(snake_case_,snake_case_ ):
return getattr(snake_case_,snake_case_ )
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,):
_A : Optional[int] = get_file_from_repo(
snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_,encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class lowercase :
def __init__( self ) -> List[Any]:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_a )
def a__ ( cls , _a , **_a ) -> Any:
_A : Tuple = kwargs.pop("""config""" , _a )
_A : Tuple = kwargs.pop("""trust_remote_code""" , _a )
_A : List[Any] = True
_A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
_A : Tuple = config_dict.get("""feature_extractor_type""" , _a )
_A : int = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
_A : int = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
_A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a )
if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_A : Tuple = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_A : Optional[Any] = feature_extractor_class_from_name(_a )
_A : List[Any] = feature_extractor_auto_map is not None
_A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
_A : Optional[int] = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
_A : Dict = get_class_from_dynamic_module(
_a , _a , **_a )
_A : str = kwargs.pop("""code_revision""" , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
_A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ ( _a , _a ) -> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 26 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
def __init__( self : str, lowerCAmelCase__ : Union[str, Any], lowerCAmelCase__ : str=1_2, lowerCAmelCase__ : Optional[Any]=7, lowerCAmelCase__ : Dict=True, lowerCAmelCase__ : Optional[Any]=True, lowerCAmelCase__ : int=True, lowerCAmelCase__ : int=9_9, lowerCAmelCase__ : List[Any]=3_2, lowerCAmelCase__ : Any=3_2, lowerCAmelCase__ : List[str]=2, lowerCAmelCase__ : Dict=4, lowerCAmelCase__ : Optional[int]=3_7, lowerCAmelCase__ : Tuple=0.1, lowerCAmelCase__ : List[str]=0.1, lowerCAmelCase__ : Any=5_1_2, lowerCAmelCase__ : List[Any]=0.02, lowerCAmelCase__ : str=0, lowerCAmelCase__ : Optional[Any]=None, ) -> int:
'''simple docstring'''
_UpperCamelCase : Dict = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Optional[Any] = seq_length
_UpperCamelCase : Optional[Any] = is_training
_UpperCamelCase : int = use_input_mask
_UpperCamelCase : int = use_labels
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = projection_dim
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : Any = intermediate_size
_UpperCamelCase : Optional[Any] = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Union[str, Any] = scope
_UpperCamelCase : int = bos_token_id
def snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_UpperCamelCase : Optional[Any] = None
if self.use_input_mask:
_UpperCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase : Union[str, Any] = input_mask.numpy()
_UpperCamelCase : Optional[int] = input_mask.shape
_UpperCamelCase : Optional[int] = np.random.randint(1, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(a_ ):
_UpperCamelCase : Any = 1
_UpperCamelCase : str = 0
_UpperCamelCase : List[str] = self.get_config()
return config, input_ids, tf.convert_to_tensor(a_ )
def snake_case ( self : Dict ) -> str:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, )
def snake_case ( self : Union[str, Any], lowerCAmelCase__ : Union[str, Any], lowerCAmelCase__ : Dict, lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[Any] = TFBlipTextModel(config=a_ )
_UpperCamelCase : int = model(a_, attention_mask=a_, training=a_ )
_UpperCamelCase : Optional[int] = model(a_, training=a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def snake_case ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase : List[Any] = config_and_inputs
_UpperCamelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _a ( __snake_case , unittest.TestCase ):
UpperCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase : str = BlipTextModelTester(self )
_UpperCamelCase : Dict = ConfigTester(self, config_class=a_, hidden_size=3_7 )
def snake_case ( self : int ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
pass
def snake_case ( self : List[Any] ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def snake_case ( self : Tuple ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def snake_case ( self : List[str] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def snake_case ( self : Dict ) -> Any:
'''simple docstring'''
pass
@slow
def snake_case ( self : Any ) -> int:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Tuple = TFBlipTextModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def snake_case ( self : Optional[Any], lowerCAmelCase__ : List[Any]=True ) -> int:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=a_ )
| 352 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _a ( unittest.TestCase ):
def snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
_UpperCamelCase : int = tempfile.mkdtemp()
_UpperCamelCase : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCamelCase : Dict = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'''do_convert_rgb''': True,
}
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname, lowerCAmelCase__ )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__, lowerCAmelCase__ )
def snake_case ( self : str, **lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__ )
def snake_case ( self : Union[str, Any], **lowerCAmelCase__ : Tuple ) -> str:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase__ )
def snake_case ( self : Any, **lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__ )
def snake_case ( self : str ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )]
_UpperCamelCase : List[Any] = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : int = self.get_rust_tokenizer()
_UpperCamelCase : int = self.get_image_processor()
_UpperCamelCase : Tuple = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCamelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCamelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer, lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor, lowerCAmelCase__ )
def snake_case ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase : Dict = self.get_tokenizer(cls_token='''(CLS)''', sep_token='''(SEP)''' )
_UpperCamelCase : List[str] = self.get_image_processor(do_normalize=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname, cls_token='''(CLS)''', sep_token='''(SEP)''', do_normalize=lowerCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCAmelCase__ )
def snake_case ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = self.get_image_processor()
_UpperCamelCase : str = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : List[str] = self.prepare_image_inputs()
_UpperCamelCase : Any = image_processor(lowerCAmelCase__, return_tensors='''np''' )
_UpperCamelCase : Any = processor(images=lowerCAmelCase__, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def snake_case ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_image_processor()
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : Any = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : Tuple = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase : List[str] = processor(text=lowerCAmelCase__ )
_UpperCamelCase : Any = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def snake_case ( self : Dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_image_processor()
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : Dict = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : Any = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCamelCase : str = processor(text=lowerCAmelCase__, images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def snake_case ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : int = self.get_image_processor()
_UpperCamelCase : int = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase : List[Any] = processor.batch_decode(lowerCAmelCase__ )
_UpperCamelCase : Dict = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
def snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Any = self.get_image_processor()
_UpperCamelCase : Optional[int] = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : Any = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase : int = self.prepare_image_inputs()
_UpperCamelCase : Dict = processor(text=lowerCAmelCase__, images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 128 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : Any = """focalnet"""
def __init__( self , __A=224 , __A=4 , __A=3 , __A=96 , __A=False , __A=[192, 384, 768, 768] , __A=[2, 2, 6, 2] , __A=[2, 2, 2, 2] , __A=[3, 3, 3, 3] , __A="gelu" , __A=4.0 , __A=0.0 , __A=0.1 , __A=False , __A=1e-4 , __A=False , __A=False , __A=False , __A=0.02 , __A=1e-5 , __A=32 , __A=None , __A=None , **__A , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
lowerCamelCase : Optional[Any] = image_size
lowerCamelCase : List[Any] = patch_size
lowerCamelCase : Dict = num_channels
lowerCamelCase : Tuple = embed_dim
lowerCamelCase : str = use_conv_embed
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : Optional[Any] = depths
lowerCamelCase : Union[str, Any] = focal_levels
lowerCamelCase : Dict = focal_windows
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = mlp_ratio
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : List[Any] = drop_path_rate
lowerCamelCase : str = use_layerscale
lowerCamelCase : str = layerscale_value
lowerCamelCase : Union[str, Any] = use_post_layernorm
lowerCamelCase : Any = use_post_layernorm_in_modulation
lowerCamelCase : Dict = normalize_modulator
lowerCamelCase : str = initializer_range
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = encoder_stride
lowerCamelCase : int = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCamelCase , lowerCamelCase : Dict = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 283 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Any = DanceDiffusionPipeline
A__ : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
A__ : List[Any] = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
A__ : Dict = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
A__ : str = False
A__ : Any = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__UpperCamelCase , use_timestep_embedding=__UpperCamelCase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
UpperCamelCase_ = IPNDMScheduler()
UpperCamelCase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
if str(__UpperCamelCase ).startswith("""mps""" ):
UpperCamelCase_ = torch.manual_seed(__UpperCamelCase )
else:
UpperCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = DanceDiffusionPipeline(**__UpperCamelCase )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ = pipe(**__UpperCamelCase )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase_ = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch_device
UpperCamelCase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(generator=__UpperCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_ = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch_device
UpperCamelCase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(generator=__UpperCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_ = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 122 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__UpperCAmelCase = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 42 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCAmelCase = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ):
a__: int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] )
return (item, float(__magic_name__ ))
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : str ):
a__: Any =random.randint(0 , len(__magic_name__ ) - 1 )
a__: Tuple =parent_a[:random_slice] + parent_a[random_slice:]
a__: List[str] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] ):
a__: str =list(__magic_name__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a__: Union[str, Any] =random.choice(__magic_name__ )
return "".join(__magic_name__ )
def __lowerCamelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ):
a__: List[Any] =[]
# Generate more children proportionally to the fitness score.
a__: Dict =int(parent_a[1] * 100 ) + 1
a__: Tuple =10 if child_n >= 10 else child_n
for _ in range(__magic_name__ ):
a__: List[str] =population_score[random.randint(0 , __magic_name__ )][0]
a__ , a__: Dict =crossover(parent_a[0] , __magic_name__ )
# Append new string to the population list.
pop.append(mutate(__magic_name__ , __magic_name__ ) )
pop.append(mutate(__magic_name__ , __magic_name__ ) )
return pop
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
a__: Any =F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(__magic_name__ )
# Verify that the target contains no genes besides the ones inside genes variable.
a__: int =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a__: str =F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(__magic_name__ )
# Generate random starting population.
a__: Tuple =[]
for _ in range(__magic_name__ ):
population.append("".join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) )
# Just some logs to know what the algorithms is doing.
a__ , a__: Any =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__magic_name__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a__: Dict =[evaluate(__magic_name__ , __magic_name__ ) for item in population]
# Check if there is a matching evolution.
a__: Any =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a__: Optional[int] =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__magic_name__ )
# Normalize population score to be between 0 and 1.
a__: List[str] =[
(item, score / len(__magic_name__ )) for item, score in population_score
]
# This is selection
for i in range(__magic_name__ ):
population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__magic_name__ ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCAmelCase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCAmelCase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 42 | 1 |
def lowerCAmelCase__( lowercase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__snake_case : Optional[int] = gray_code_sequence_string(lowercase )
#
# convert them to integers
for i in range(len(lowercase ) ):
__snake_case : Optional[Any] = int(sequence[i] , 2 )
return sequence
def lowerCAmelCase__( lowercase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case : Union[str, Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case : List[Any] = gray_code_sequence_string(bit_count - 1 )
__snake_case : Dict = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case : Optional[int] = "0" + smaller_sequence[i]
sequence.append(lowercase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case : List[Any] = "1" + smaller_sequence[i]
sequence.append(lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def lowerCAmelCase__( lowercase : str ) -> Optional[Any]:
__snake_case : Optional[int] = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCAmelCase__( lowercase : List[Any] , lowercase : List[Any] , lowercase : List[Any]=rename_keys_prefix ) -> Dict:
__snake_case : Tuple = OrderedDict()
__snake_case : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__snake_case : Optional[Any] = key
for name_pair in rename_keys_prefix:
__snake_case : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
__snake_case : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__snake_case : List[Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : Any ) -> List[Any]:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
__snake_case : Any = "pretraining"
if "vcr" in checkpoint_path:
__snake_case : Optional[Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__snake_case : Tuple = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__snake_case : Dict = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__snake_case : Any = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
__snake_case : Dict = {"visual_embedding_dim": 512}
__snake_case : Any = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__snake_case : List[Any] = {"visual_embedding_dim": 2048}
__snake_case : Optional[Any] = "vqa_advanced"
elif "vqa" in checkpoint_path:
__snake_case : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
__snake_case : Union[str, Any] = "vqa"
elif "nlvr" in checkpoint_path:
__snake_case : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__snake_case : List[Any] = "nlvr"
__snake_case : Union[str, Any] = VisualBertConfig(**lowercase )
# Load State Dict
__snake_case : Any = load_state_dict(lowercase )
__snake_case : Dict = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
__snake_case : Optional[Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
__snake_case : Tuple = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
__snake_case : Tuple = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
__snake_case : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 326 | 1 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _snake_case ( _snake_case : int ) -> Dict:
'''simple docstring'''
_A = test_results.split(' ' )
_A = 0
_A = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_A = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_snake_case ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _snake_case ( _snake_case : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_A = {}
_A = None
_A = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , _snake_case ):
_A = True
_A = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
_A = line
_A = False
return failures
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ):
_A = title
_A = doc_test_results['time_spent'].split(',' )[0]
_A = doc_test_results['success']
_A = doc_test_results['failures']
_A = self.n_success + self.n_failures
# Failures and success of the modeling tests
_A = doc_test_results
@property
def lowerCAmelCase_ ( self : Tuple ):
_A = [self._time_spent]
_A = 0
for time in time_spent:
_A = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_UpperCAmelCase ) == 1:
_A = [0, 0, time_parts[0]]
_A , _A , _A = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
_A , _A , _A = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return F'''{int(_UpperCAmelCase )}h{int(_UpperCAmelCase )}m{int(_UpperCAmelCase )}s'''
@property
def lowerCAmelCase_ ( self : Any ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase_ ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def lowerCAmelCase_ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def lowerCAmelCase_ ( self : List[str] ):
_A = 40
_A = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(_UpperCAmelCase , _UpperCAmelCase )}
_A = ''
for category, failures in category_failures.items():
if len(_UpperCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_UpperCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def lowerCAmelCase_ ( self : List[Any] ):
_A = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_UpperCAmelCase )
@staticmethod
def lowerCAmelCase_ ( ):
_A = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(_UpperCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=_UpperCAmelCase , )
def lowerCAmelCase_ ( self : List[str] ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
_A = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.'
_A = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=_UpperCAmelCase , )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ):
_A = ''
for key, value in failures.items():
_A = value[:200] + ' [Truncated]' if len(_UpperCAmelCase ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
_A = job_name
_A = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
_A = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase_ ( self : List[str] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
_A = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
_A = sorted(self.doc_test_results.items() , key=lambda _UpperCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
_A = F'''*Num failures* :{len(job_result["failed"] )} \n'''
_A = job_result['failures']
_A = self.get_reply_blocks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text=_UpperCAmelCase )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'''Results for {job}''' , blocks=_UpperCAmelCase , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
_A = os.environ['GITHUB_RUN_ID']
_A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
_A = requests.get(_snake_case ).json()
_A = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
_A = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(_snake_case ):
_A = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _snake_case )
return {}
def _snake_case ( _snake_case : str ) -> int:
'''simple docstring'''
_A = {}
if os.path.exists(_snake_case ):
_A = os.listdir(_snake_case )
for file in files:
try:
with open(os.path.join(_snake_case , _snake_case ) , encoding='utf-8' ) as f:
_A = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(_snake_case , _snake_case )}.''' ) from e
return _artifact
def _snake_case ( ) -> int:
'''simple docstring'''
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : str ):
_A = name
_A = []
def __str__( self : int ):
return self.name
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str ):
self.paths.append({'name': self.name, 'path': path} )
_A = {}
_A = filter(os.path.isdir , os.listdir() )
for directory in directories:
_A = directory
if artifact_name not in _available_artifacts:
_A = Artifact(_snake_case )
_available_artifacts[artifact_name].add_path(_snake_case )
return _available_artifacts
if __name__ == "__main__":
a = get_job_links()
a = retrieve_available_artifacts()
a = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a = github_actions_job_links.get('''run_doctests''')
a = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
a = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
a , a , a = handle_test_results(artifact['''stats'''])
a = failed
a = success
a = time_spent[1:-1] + ''', '''
a = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
a = line.replace('''FAILED ''', '''''')
a = line.split()[0].replace('''\n''', '''''')
if "::" in line:
a , a = line.split('''::''')
else:
a , a = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a = all_failures[test] if test in all_failures else '''N/A'''
a = failure
break
a = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 271 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a = logging.get_logger(__name__)
def _snake_case ( _snake_case : bool , _snake_case : bool ) -> Tuple:
'''simple docstring'''
def run_func(_snake_case : Any ):
@wraps(_snake_case )
def run_in_eager_mode(*_snake_case : List[str] , **_snake_case : Tuple ):
return func(*_snake_case , **_snake_case )
@wraps(_snake_case )
@tf.function(experimental_compile=_snake_case )
def run_in_graph_mode(*_snake_case : Dict , **_snake_case : Tuple ):
return func(*_snake_case , **_snake_case )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int ) -> ["tf.Tensor"]:
'''simple docstring'''
_A = random.Random()
_A = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_snake_case , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : TensorFlowBenchmarkArguments
UpperCAmelCase : PretrainedConfig
UpperCAmelCase : str = "TensorFlow"
@property
def lowerCAmelCase_ ( self : str ):
return tf.__version__
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
# initialize GPU on separate process
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_speed(_inference )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_speed(_train )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase )
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_memory(_inference )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase )
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_memory(_train )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_A = (
hasattr(_UpperCAmelCase , 'architectures' )
and isinstance(config.architectures , _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_A = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_A = __import__('transformers' , fromlist=[model_class] )
_A = getattr(_UpperCAmelCase , _UpperCAmelCase )
_A = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_A = TF_MODEL_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
_A = config.vocab_size if hasattr(_UpperCAmelCase , 'vocab_size' ) else config.encoder.vocab_size
_A = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , training=_UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_UpperCAmelCase , training=_UpperCAmelCase )
_A = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_A = (
hasattr(_UpperCAmelCase , 'architectures' )
and isinstance(config.architectures , _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_A = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_A = __import__('transformers' , fromlist=[model_class] )
_A = getattr(_UpperCAmelCase , _UpperCAmelCase )
_A = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_A = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
_A = config.vocab_size if hasattr(_UpperCAmelCase , 'vocab_size' ) else config.encoder.vocab_size
_A = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_A = model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )[0]
_A = tf.gradients(_UpperCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )[0]
_A = tf.gradients(_UpperCAmelCase , model.trainable_variables )
return gradients
_A = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : int ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(_UpperCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_A = timeit.repeat(
_UpperCAmelCase , repeat=self.args.repeat , number=10 , )
return min(_UpperCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Callable[[], None] ):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
_A = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
_A = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
_A = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_A = nvml.nvmlDeviceGetMemoryInfo(_UpperCAmelCase )
_A = meminfo.used
_A = Memory(_UpperCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
_A = None
else:
_A = measure_peak_memory_cpu(_UpperCAmelCase )
_A = Memory(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_A = stop_memory_tracing(_UpperCAmelCase )
if memory is None:
_A = summary.total
else:
_A = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 271 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCamelCase : Tuple = "\nHuman: <<task>>\n\nAssistant: "
_lowerCamelCase : List[Any] = "huggingface-tools/default-prompts"
_lowerCamelCase : Union[str, Any] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( A__ , A__ , A__="run" ) -> List[Any]:
"""simple docstring"""
if prompt_or_repo_id is None:
UpperCamelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , A__ ) is not None:
return prompt_or_repo_id
UpperCamelCase = cached_file(
A__ , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(A__ , 'r' , encoding='utf-8' ) as f:
return f.read()
| 28 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict:
"""simple docstring"""
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
return jnp.matmul(A__ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = jnp.floataa
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype )
UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
UpperCamelCase = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.vision_model(UpperCamelCase__ )[1]
UpperCamelCase = self.visual_projection(UpperCamelCase__ )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase = 0.0
UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase = is_special_care * 0.0_1
UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CLIPConfig
_SCREAMING_SNAKE_CASE = """clip_input"""
_SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
if input_shape is None:
UpperCamelCase = (1, 2_2_4, 2_2_4, 3)
UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ )
super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init )
def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ):
"""simple docstring"""
UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ )
UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params']
return random_params
def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ):
"""simple docstring"""
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 28 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase : str = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A_ ( A__ ) -> Optional[int]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(A__ , '_dynamo' ):
return False
return isinstance(A__ , torch._dynamo.eval_frame.OptimizedModule )
def A_ ( A__ , A__ = True ) -> int:
a__ : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
a__ : Union[str, Any] = is_compiled_module(A__ )
if is_compiled:
a__ : List[str] = model
a__ : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__ , A__ ):
a__ : str = model.module
if not keep_fpaa_wrapper:
a__ : Union[str, Any] = getattr(A__ , 'forward' )
a__ : List[Any] = model.__dict__.pop('_original_forward' , A__ )
if original_forward is not None:
while hasattr(A__ , '__wrapped__' ):
a__ : int = forward.__wrapped__
if forward == original_forward:
break
a__ : List[Any] = forward
if getattr(A__ , '_converted_to_transformer_engine' , A__ ):
convert_model(A__ , to_transformer_engine=A__ )
if is_compiled:
a__ : List[str] = model
a__ : Any = compiled_model
return model
def A_ ( ) -> int:
PartialState().wait_for_everyone()
def A_ ( A__ , A__ ) -> Dict:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__ , A__ )
elif PartialState().local_process_index == 0:
torch.save(A__ , A__ )
@contextmanager
def A_ ( **A__ ) -> Any:
for key, value in kwargs.items():
a__ : Optional[int] = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A_ ( A__ ) -> List[str]:
if not hasattr(A__ , '__qualname__' ) and not hasattr(A__ , '__name__' ):
a__ : Dict = getattr(A__ , '__class__' , A__ )
if hasattr(A__ , '__qualname__' ):
return obj.__qualname__
if hasattr(A__ , '__name__' ):
return obj.__name__
return str(A__ )
def A_ ( A__ , A__ ) -> Dict:
for key, value in source.items():
if isinstance(A__ , A__ ):
a__ : Optional[Any] = destination.setdefault(A__ , {} )
merge_dicts(A__ , A__ )
else:
a__ : Optional[int] = value
return destination
def A_ ( A__ = None ) -> bool:
if port is None:
a__ : List[Any] = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 225 | 1 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def a__ ( A_ ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def a__ ( ):
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
__magic_name__ = [1, 2, 3]
with pytest.raises(A_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(A_, A_, num_proc=2 )
with pytest.raises(A_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(A_, A_, num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""", [2, -1] )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [1, 2]
__magic_name__ = {"""a""": 1, """b""": 2}
__magic_name__ = {"""a""": [1, 2], """b""": [3, 4]}
__magic_name__ = {"""a""": {"""1""": 1}, """b""": 2}
__magic_name__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__magic_name__ = [2, 3]
__magic_name__ = {"""a""": 2, """b""": 3}
__magic_name__ = {"""a""": [2, 3], """b""": [4, 5]}
__magic_name__ = {"""a""": {"""1""": 2}, """b""": 3}
__magic_name__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(A_, A_, num_proc=A_ ) == expected_map_nested_sa
assert map_nested(A_, A_, num_proc=A_ ) == expected_map_nested_sa
assert map_nested(A_, A_, num_proc=A_ ) == expected_map_nested_sa
assert map_nested(A_, A_, num_proc=A_ ) == expected_map_nested_sa
assert map_nested(A_, A_, num_proc=A_ ) == expected_map_nested_sa
| 88 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
a__ : int
a__ : Node | None = None
a__ : Node | None = None
def lowercase__ ( ):
__UpperCAmelCase = Node(1 )
__UpperCAmelCase = Node(2 )
__UpperCAmelCase = Node(3 )
__UpperCAmelCase = Node(4 )
__UpperCAmelCase = Node(5 )
return tree
def lowercase__ ( snake_case_ :Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowercase__ ( snake_case_ :Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowercase__ ( snake_case_ :Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowercase__ ( snake_case_ :Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowercase__ ( snake_case_ :Node | None ):
__UpperCAmelCase = []
if root is None:
return output
__UpperCAmelCase = deque([root] )
while process_queue:
__UpperCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ):
__UpperCAmelCase = []
def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(snake_case_ , snake_case_ )
return output
def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ):
__UpperCAmelCase = []
def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(snake_case_ , snake_case_ )
return output
def lowercase__ ( snake_case_ :Node | None ):
if root is None:
return []
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = height(snake_case_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(snake_case_ , snake_case_ ) )
__UpperCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(snake_case_ , snake_case_ ) )
__UpperCAmelCase = 0
return output
def lowercase__ ( ): # Main function for testing.
__UpperCAmelCase = make_tree()
print(F'''In-order Traversal: {inorder(snake_case_ )}''' )
print(F'''Pre-order Traversal: {preorder(snake_case_ )}''' )
print(F'''Post-order Traversal: {postorder(snake_case_ )}''' , '''\n''' )
print(F'''Height of Tree: {height(snake_case_ )}''' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(snake_case_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(snake_case_ ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(snake_case_ , level=snake_case_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 332 | 0 |
def UpperCamelCase( lowercase_ ) -> Dict:
'''simple docstring'''
snake_case_ = [0] * len(lowercase_ )
snake_case_ = []
snake_case_ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
snake_case_ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
lowerCamelCase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph) | 34 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
snake_case_ = AutoConfig.from_pretrained(lowercase_ )
snake_case_ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase_ )
snake_case_ = checkpoints.load_tax_checkpoint(lowercase_ )
snake_case_ = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
snake_case_ = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
snake_case_ = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
snake_case_ = f'''layers_{str(lowercase_ )}'''
# Self-Attention
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
snake_case_ = flax_model.params["""encoder"""]["""block"""][str(lowercase_ )]["""layer"""]
snake_case_ = tax_attention_key
snake_case_ = tax_attention_out
snake_case_ = tax_attention_query
snake_case_ = tax_attention_value
snake_case_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = tax_global_layer_norm
if split_mlp_wi:
snake_case_ = tax_mlp_wi_a
snake_case_ = tax_mlp_wi_a
else:
snake_case_ = tax_mlp_wi
snake_case_ = tax_mlp_wo
snake_case_ = tax_mlp_layer_norm
snake_case_ = flax_model_encoder_layer_block
# Only for layer 0:
snake_case_ = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
snake_case_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
snake_case_ = tax_encoder_global_rel_embedding
# Assigning
snake_case_ = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
snake_case_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
snake_case_ = f'''layers_{str(lowercase_ )}'''
# Self-Attention
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
snake_case_ = tax_enc_dec_attention_module["""key"""]["""kernel"""]
snake_case_ = tax_enc_dec_attention_module["""out"""]["""kernel"""]
snake_case_ = tax_enc_dec_attention_module["""query"""]["""kernel"""]
snake_case_ = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
snake_case_ = flax_model.params["""decoder"""]["""block"""][str(lowercase_ )]["""layer"""]
snake_case_ = tax_attention_key
snake_case_ = tax_attention_out
snake_case_ = tax_attention_query
snake_case_ = tax_attention_value
snake_case_ = tax_pre_attention_layer_norm
snake_case_ = tax_enc_dec_attention_key
snake_case_ = tax_enc_dec_attention_out
snake_case_ = tax_enc_dec_attention_query
snake_case_ = tax_enc_dec_attention_value
snake_case_ = tax_cross_layer_norm
if split_mlp_wi:
snake_case_ = tax_mlp_wi_a
snake_case_ = tax_mlp_wi_a
else:
snake_case_ = tax_mlp_wi
snake_case_ = tax_mlp_wo
snake_case_ = txa_mlp_layer_norm
snake_case_ = flax_model_decoder_layer_block
# Decoder Normalization
snake_case_ = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
snake_case_ = txa_decoder_norm
# Only for layer 0:
snake_case_ = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
snake_case_ = tax_decoder_rel_embedding
# Token Embeddings
snake_case_ = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
snake_case_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
snake_case_ = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(lowercase_ )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowerCamelCase_ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path) | 34 | 1 |
def __lowercase ( _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = len(A_ )
for i in range(length - 1 ):
lowercase : Tuple = i
for k in range(i + 1, A_ ):
if collection[k] < collection[least]:
lowercase : Optional[int] = k
if least != i:
lowercase : List[str] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__a = input('''Enter numbers separated by a comma:\n''').strip()
__a = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 337 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''CLIPFeatureExtractor''']
__UpperCamelCase : Optional[Any] = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__UpperCAmelCase = HfApi()
__UpperCAmelCase = {}
# fmt: off
__UpperCAmelCase = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
__UpperCAmelCase = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
__UpperCAmelCase = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
__UpperCAmelCase = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
__UpperCAmelCase = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
__UpperCAmelCase = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
__UpperCAmelCase = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
__UpperCAmelCase = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
__UpperCAmelCase = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
__UpperCAmelCase = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
__UpperCAmelCase = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
__UpperCAmelCase = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
__UpperCAmelCase = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
__UpperCAmelCase = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
__UpperCAmelCase = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
__UpperCAmelCase = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__UpperCAmelCase = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith("""CompVis"""):
__UpperCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
__UpperCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__UpperCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__UpperCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__UpperCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(F'{mod.modelId} has passed successfully!!!')
| 139 |
def snake_case_ () -> List[Any]:
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def snake_case_ (__A : Dict ) -> Tuple:
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[int] = 2
while i * i <= n:
__lowerCAmelCase : Optional[int] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def snake_case_ () -> Dict:
return next(i for i in triangle_number_generator() if count_divisors(__A ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 139 | 1 |
'''simple docstring'''
_UpperCamelCase = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_UpperCamelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_UpperCamelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 254 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254 | 1 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ) -> Dict:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowercase : Any = os.path.abspath(__magic_name__ )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
lowercase : List[str] = torch.load(__magic_name__ , map_location='''cpu''' )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
lowercase : int = convert_pytorch_state_dict_to_flax(__magic_name__ , __magic_name__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowercase : Optional[Any] = convert_pytorch_sharded_state_dict_to_flax(__magic_name__ , __magic_name__ )
return flax_state_dict
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(__magic_name__ ) -> bool:
return len(set(__magic_name__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowercase : str = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__magic_name__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowercase : Dict = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__magic_name__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowercase : List[Any] = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__magic_name__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowercase : Optional[int] = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__magic_name__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__magic_name__ ):
lowercase : Tuple = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__magic_name__ ):
lowercase : List[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase : str = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase : Optional[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowercase : List[str] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowercase : str = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowercase : List[Any] = pt_tuple_key[-2] + '''_v'''
if name is not None:
lowercase : int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def snake_case( __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase : Optional[int] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowercase : Any = flax_model.params['''params''']
else:
lowercase : List[Any] = flax_model.params
lowercase : str = flatten_dict(__magic_name__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase : Tuple = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__magic_name__ )
lowercase : Optional[Any] = {}
lowercase : List[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase : int = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase : Optional[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase , lowercase : List[str] = rename_key_and_reshape_tensor(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# add model prefix if necessary
lowercase : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase : List[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowercase : str = jnp.asarray(__magic_name__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__magic_name__ , __magic_name__ )
continue
# also add unexpected weight so that warning is thrown
lowercase : Any = jnp.asarray(__magic_name__ )
else:
# also add unexpected weight so that warning is thrown
lowercase : Tuple = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
import torch
# Load the index
lowercase : Optional[int] = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowercase : Any = torch.load(__magic_name__ )
lowercase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase : Optional[int] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase : Tuple = flax_model.params['''params''']
lowercase : List[str] = flatten_dict(__magic_name__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowercase : Tuple = flax_model.params
lowercase : Optional[int] = flatten_dict(__magic_name__ )
lowercase : int = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowercase : List[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase : Dict = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowercase : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase , lowercase : str = rename_key_and_reshape_tensor(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# add model prefix if necessary
lowercase : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase : List[str] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowercase : List[Any] = jnp.asarray(__magic_name__ )
continue
if "var" in flax_key[-1]:
lowercase : Union[str, Any] = jnp.asarray(__magic_name__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__magic_name__ , __magic_name__ )
continue
# also add unexpected weight so that warning is thrown
lowercase : str = jnp.asarray(__magic_name__ )
else:
# also add unexpected weight so that warning is thrown
lowercase : Optional[int] = jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : str = os.path.abspath(__magic_name__ )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
lowercase : Union[str, Any] = getattr(__magic_name__ , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__magic_name__ , '''rb''' ) as state_f:
try:
lowercase : List[Any] = from_bytes(__magic_name__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowercase : str = flatten_dict(jax.tree_util.tree_map(lambda __magic_name__ : x.dtype == jnp.bfloataa , __magic_name__ ) ).values()
if any(__magic_name__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowercase : Dict = jax.tree_util.tree_map(
lambda __magic_name__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __magic_name__ )
lowercase : Tuple = flatten_dict(__magic_name__ )
lowercase : List[Any] = pt_model.state_dict()
lowercase : Optional[int] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowercase : Dict = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowercase : Optional[int] = []
lowercase : List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase : Dict = flax_key_tuple[0] == pt_model.base_model_prefix
lowercase : Any = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase : Tuple = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowercase : Optional[int] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__magic_name__ ) not in pt_model_dict:
# conv layer
lowercase : Dict = flax_key_tuple[:-1] + ('''weight''',)
lowercase : Dict = jnp.transpose(__magic_name__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__magic_name__ ) not in pt_model_dict:
# linear layer
lowercase : str = flax_key_tuple[:-1] + ('''weight''',)
lowercase : List[str] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase : List[Any] = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowercase : List[str] = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowercase : List[str] = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowercase : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowercase : int = '''.'''.join(__magic_name__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowercase : str = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowercase : Optional[Any] = key.split('''.''' )
lowercase : Union[str, Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowercase : Tuple = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowercase : List[str] = key_components[-2] + '''_v'''
if name is not None:
lowercase : Dict = key_components[:-3] + [name]
lowercase : int = '''.'''.join(__magic_name__ )
lowercase : int = key
if flax_key in special_pt_names:
lowercase : str = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowercase : str = np.asarray(__magic_name__ ) if not isinstance(__magic_name__ , np.ndarray ) else flax_tensor
lowercase : Tuple = torch.from_numpy(__magic_name__ )
# remove from missing keys
missing_keys.remove(__magic_name__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__magic_name__ )
pt_model.load_state_dict(__magic_name__ )
# re-transform missing_keys to list
lowercase : int = list(__magic_name__ )
if len(__magic_name__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(__magic_name__ ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model | 116 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCAmelCase_ = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCAmelCase_ = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __a ( self : Any , _A : Dict , _A : Any , _A : Any=None , _A : Any="uniform_average" , _A : Optional[Any]=True ) -> Dict:
"""simple docstring"""
lowercase : Any = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse} | 116 | 1 |