code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[Any] = []
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for v in tree.values():
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : str = []
for d in reversed(SCREAMING_SNAKE_CASE ):
idx.append(flat_idx % d )
A_ : List[str] = flat_idx // d
return tuple(reversed(SCREAMING_SNAKE_CASE ) )
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(SCREAMING_SNAKE_CASE ) -> None:
A_ : Dict = True
for i in range(len(SCREAMING_SNAKE_CASE ) ):
A_ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
A_ : Any = l[reversed_idx]
if start_edges is None:
A_ : Tuple = [s == 0 for s in start]
reduce_edge_list(SCREAMING_SNAKE_CASE )
if end_edges is None:
A_ : Union[str, Any] = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
reduce_edge_list(SCREAMING_SNAKE_CASE )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(SCREAMING_SNAKE_CASE ) == 0:
return [()]
elif len(SCREAMING_SNAKE_CASE ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
A_ : List[Tuple[slice, ...]] = []
A_ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if s == e:
path_list.append(slice(SCREAMING_SNAKE_CASE , s + 1 ) )
else:
break
A_ : Tuple[slice, ...] = tuple(SCREAMING_SNAKE_CASE )
A_ : Optional[int] = len(SCREAMING_SNAKE_CASE )
# start == end, and we're done
if divergence_idx == len(SCREAMING_SNAKE_CASE ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
A_ : List[Any] = start[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
A_ : Union[str, Any] = end[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
A_ : int = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Tuple = t.shape[:no_batch_dims]
A_ : Tuple = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# _get_minimal_slice_set is inclusive
A_ : List[str] = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE ) )
# Get an ordered list of slices to perform
A_ : List[Any] = _get_minimal_slice_set(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
A_ : Tuple = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ):
if not (len(SCREAMING_SNAKE_CASE ) > 0):
raise ValueError('''Must provide at least one input''' )
A_ : int = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE )]
A_ : int = tuple([max(SCREAMING_SNAKE_CASE ) for s in zip(*SCREAMING_SNAKE_CASE )] )
def _prep_inputs(SCREAMING_SNAKE_CASE ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
A_ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
A_ : List[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
A_ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
A_ : Dict[str, Any] = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = None
if _out is not None:
A_ : Optional[int] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
A_ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
A_ : Optional[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(SCREAMING_SNAKE_CASE ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
A_ : Union[str, Any] = 0
A_ : Optional[int] = prepped_outputs
for _ in range(SCREAMING_SNAKE_CASE ):
# Chunk the input
if not low_mem:
A_ : Optional[Any] = _select_chunk
else:
A_ : Dict = partial(
_chunk_slice , flat_start=SCREAMING_SNAKE_CASE , flat_end=min(SCREAMING_SNAKE_CASE , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE ) , )
A_ : Dict[str, Any] = tensor_tree_map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Run the layer on the chunk
A_ : List[str] = layer(**SCREAMING_SNAKE_CASE )
# Allocate space for the output
if out is None:
A_ : Dict = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE )
# Put the chunk in its pre-allocated space
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def assign(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
for k, v in da.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assign(SCREAMING_SNAKE_CASE , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
A_ : Union[str, Any] = da[k]
assign(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for xa, xa in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
A_ : Any = xa
elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
A_ : Tuple = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
A_ : Optional[Any] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE )
return out
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE = 512 , )->Any:
'''simple docstring'''
A_ : Any = max_chunk_size
A_ : Optional[int] = None
A_ : Optional[tuple] = None
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
A_ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
A_ : int = [c for c in candidates if c > min_chunk_size]
A_ : Dict = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_SCREAMING_SNAKE_CASE ) -> bool:
try:
with torch.no_grad():
fn(*_SCREAMING_SNAKE_CASE , chunk_size=_SCREAMING_SNAKE_CASE )
return True
except RuntimeError:
return False
A_ : List[str] = 0
A_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) - 1
while i > min_viable_chunk_size_index:
A_ : List[str] = test_chunk_size(candidates[i] )
if not viable:
A_ : Any = (min_viable_chunk_size_index + i) // 2
else:
A_ : Any = i
A_ : int = (i + len(_SCREAMING_SNAKE_CASE ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->bool:
'''simple docstring'''
A_ : List[Any] = True
for aa, aa in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert type(_SCREAMING_SNAKE_CASE ) == type(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
consistent &= self._compare_arg_caches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda _SCREAMING_SNAKE_CASE : x[0] )]
A_ : int = [v for _, v in sorted(aa.items() , key=lambda _SCREAMING_SNAKE_CASE : x[0] )]
consistent &= self._compare_arg_caches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
A_ : List[Any] = True
A_ : tuple = tree_map(lambda _SCREAMING_SNAKE_CASE : a.shape if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) else a , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_SCREAMING_SNAKE_CASE )
A_ : Tuple = self._compare_arg_caches(self.cached_arg_data , _SCREAMING_SNAKE_CASE )
else:
# Otherwise, we can reuse the precomputed value
A_ : Union[str, Any] = False
if not consistent:
A_ : List[Any] = self._determine_favorable_chunk_size(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
A_ : Union[str, Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 186 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = DDIMPipeline
snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case = False
def _snake_case ( self )->List[str]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
A_ : Optional[Any] = DDIMScheduler()
A_ : str = {'''unet''': unet, '''scheduler''': scheduler}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->Optional[Any]:
'''simple docstring'''
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Any = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = '''cpu'''
A_ : Dict = self.get_dummy_components()
A_ : str = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : Any = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A_ : List[Any] = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _snake_case ( self )->Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : int = '''google/ddpm-cifar10-32'''
A_ : Tuple = UNetaDModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : str = DDIMScheduler()
A_ : str = DDIMPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
ddim.to(_SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = torch.manual_seed(0 )
A_ : Any = ddim(generator=_SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
A_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : Any = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Tuple = '''google/ddpm-ema-bedroom-256'''
A_ : int = UNetaDModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : Any = DDIMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = DDIMPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
ddpm.to(_SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Dict = torch.manual_seed(0 )
A_ : List[str] = ddpm(generator=_SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
A_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ : Tuple = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 186 | 1 |
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Dict) -> str:
"""simple docstring"""
_UpperCamelCase = arr.split(",")
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = [int(self.array[0])] * len(self.array)
_UpperCamelCase = [int(self.array[0])] * len(self.array)
for i in range(1 , len(self.array)):
_UpperCamelCase = max(
int(self.array[i]) + sum_value[i - 1] , int(self.array[i]))
_UpperCamelCase = max(sum_value[i] , rear[i - 1])
return rear[len(self.array) - 1]
if __name__ == "__main__":
lowerCamelCase__ = input('''please input some numbers:''')
lowerCamelCase__ = SubArray(whole_array)
lowerCamelCase__ = array.solve_sub_array()
print(('''the results is:''', re))
| 361 | from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = 30
_UpperCamelCase = self.seq_length + self.mem_len
_UpperCamelCase = 15
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = [10, 50, 80]
_UpperCamelCase = 32
_UpperCamelCase = 32
_UpperCamelCase = 4
_UpperCamelCase = 8
_UpperCamelCase = 128
_UpperCamelCase = 2
_UpperCamelCase = 2
_UpperCamelCase = None
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = 3
_UpperCamelCase = self.vocab_size - 1
_UpperCamelCase = 0.01
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
random.seed(self.seed)
tf.random.set_seed(self.seed)
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLModel(lowercase_)
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
_UpperCamelCase = {"input_ids": input_ids_a, "mems": mems_a}
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : str , lowercase_ : Dict , lowercase_ : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLLMHeadModel(lowercase_)
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
_UpperCamelCase = {"input_ids": input_ids_a, "labels": lm_labels}
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
_UpperCamelCase , _UpperCamelCase = model([input_ids_a, mems_a]).to_tuple()
_UpperCamelCase = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict) -> str:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLForSequenceClassification(lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__A = () if is_tf_available() else ()
__A = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Any , lowercase_ : List[str]) -> Any:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , d_embed=37)
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
self.model_tester.set_seed()
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
self.model_tester.set_seed()
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase_)
def __UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
_UpperCamelCase = model.get_output_embeddings()
assert isinstance(lowercase_ , tf.keras.layers.Layer)
_UpperCamelCase = model.get_bias()
assert name is None
else:
_UpperCamelCase = model.get_output_embeddings()
assert x is None
_UpperCamelCase = model.get_bias()
assert name is None
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
pass
@slow
def __UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFTransfoXLModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.")
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
pass
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Skip test until #12651 is resolved.")
@slow
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103")
# fmt: off
_UpperCamelCase = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_UpperCamelCase = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_UpperCamelCase = model.generate(lowercase_ , max_length=200 , do_sample=lowercase_)
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_)
| 63 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase_ ( unittest.TestCase ):
__UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __a ( self , a , a , a ):
UpperCamelCase__ = TextaTextGenerationPipeline(model=a , tokenizer=a )
return generator, ["Something to write", "Something else"]
def __a ( self , a , a ):
UpperCamelCase__ = generator("Something there" )
self.assertEqual(a , [{"generated_text": ANY(a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
UpperCamelCase__ = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a )
self.assertEqual(
a , [
[{"generated_text": ANY(a )}, {"generated_text": ANY(a )}],
[{"generated_text": ANY(a )}, {"generated_text": ANY(a )}],
] , )
UpperCamelCase__ = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a )
self.assertEqual(
a , [
[{"generated_text": ANY(a )}, {"generated_text": ANY(a )}],
[{"generated_text": ANY(a )}, {"generated_text": ANY(a )}],
] , )
with self.assertRaises(a ):
generator(4 )
@require_torch
def __a ( self ):
UpperCamelCase__ = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
UpperCamelCase__ = generator("Something there" , do_sample=a )
self.assertEqual(a , [{"generated_text": ""}] )
UpperCamelCase__ = 3
UpperCamelCase__ = generator(
"Something there" , num_return_sequences=a , num_beams=a , )
UpperCamelCase__ = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(a , a )
UpperCamelCase__ = generator("This is a test" , do_sample=a , num_return_sequences=2 , return_tensors=a )
self.assertEqual(
a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
UpperCamelCase__ = generator.model.config.eos_token_id
UpperCamelCase__ = "<pad>"
UpperCamelCase__ = generator(
["This is a test", "This is a second test"] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , )
self.assertEqual(
a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def __a ( self ):
UpperCamelCase__ = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
UpperCamelCase__ = generator("Something there" , do_sample=a )
self.assertEqual(a , [{"generated_text": ""}] )
| 80 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase_ ( enum.Enum ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@add_end_docstrings(a__ )
class lowercase_ ( a__ ):
__UpperCAmelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *a , **a ):
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCamelCase__ = None
if self.model.config.prefix is not None:
UpperCamelCase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCamelCase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._sanitize_parameters(prefix=a , **self._forward_params )
UpperCamelCase__ = {**self._preprocess_params, **preprocess_params}
UpperCamelCase__ = {**self._forward_params, **forward_params}
def __a ( self , a=None , a=None , a=None , a=None , a=None , a=None , a=None , a=None , **a , ):
UpperCamelCase__ = {}
if prefix is not None:
UpperCamelCase__ = prefix
if prefix:
UpperCamelCase__ = self.tokenizer(
a , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
UpperCamelCase__ = handle_long_generation
preprocess_params.update(a )
UpperCamelCase__ = generate_kwargs
UpperCamelCase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.TENSORS
if return_type is not None:
UpperCamelCase__ = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase__ = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCamelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self , *a , **a ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*a , **a )
def __call__( self , a , **a ):
return super().__call__(a , **a )
def __a ( self , a , a="" , a=None , **a ):
UpperCamelCase__ = self.tokenizer(
prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prompt_text
if handle_long_generation == "hole":
UpperCamelCase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCamelCase__ = generate_kwargs["max_new_tokens"]
else:
UpperCamelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCamelCase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCamelCase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCamelCase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def __a ( self , a , **a ):
UpperCamelCase__ = model_inputs["input_ids"]
UpperCamelCase__ = model_inputs.get("attention_mask" , a )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = 1
else:
UpperCamelCase__ = input_ids.shape[0]
UpperCamelCase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCamelCase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCamelCase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCamelCase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCamelCase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCamelCase__ = self.model.generate(input_ids=a , attention_mask=a , **a )
UpperCamelCase__ = generated_sequence.shape[0]
if self.framework == "pt":
UpperCamelCase__ = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCamelCase__ = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __a ( self , a , a=ReturnType.FULL_TEXT , a=True ):
UpperCamelCase__ = model_outputs["generated_sequence"][0]
UpperCamelCase__ = model_outputs["input_ids"]
UpperCamelCase__ = model_outputs["prompt_text"]
UpperCamelCase__ = generated_sequence.numpy().tolist()
UpperCamelCase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCamelCase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCamelCase__ = self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCamelCase__ = 0
else:
UpperCamelCase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCamelCase__ = prompt_text + text[prompt_length:]
else:
UpperCamelCase__ = text[prompt_length:]
UpperCamelCase__ = {"generated_text": all_text}
records.append(a )
return records
| 80 | 1 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__UpperCAmelCase : Tuple = Lock()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(SCREAMING_SNAKE_CASE__)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__snake_case: Union[str, Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__snake_case: Optional[Any] = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(SCREAMING_SNAKE_CASE__)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__snake_case: int = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__snake_case: int = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# after all swaps are performed, send the values back to main
result_pipe[1].send(SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
__snake_case: List[Any] = []
__snake_case: List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__snake_case: str = Pipe()
__snake_case: Any = Pipe()
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
__snake_case: Optional[Any] = temp_rs
__snake_case: Optional[int] = temp_rr
for i in range(1 , len(SCREAMING_SNAKE_CASE__) - 1):
__snake_case: Optional[Any] = Pipe()
__snake_case: int = Pipe()
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
__snake_case: Any = temp_rs
__snake_case: List[Any] = temp_rr
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE__ , args=(
len(SCREAMING_SNAKE_CASE__) - 1,
arr[len(SCREAMING_SNAKE_CASE__) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(SCREAMING_SNAKE_CASE__) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(SCREAMING_SNAKE_CASE__)):
__snake_case: Tuple = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def A__ ( ) -> Union[str, Any]:
__snake_case: List[str] = list(range(10 , 0 , -1))
print("""Initial List""")
print(*SCREAMING_SNAKE_CASE__)
__snake_case: int = odd_even_transposition(SCREAMING_SNAKE_CASE__)
print("""Sorted List\n""")
print(*SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
main()
| 293 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase : str = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , A : int , A : int , A : float , **A : Optional[int] ):
__snake_case: List[str] = feature_size
__snake_case: Optional[int] = sampling_rate
__snake_case: Any = padding_value
__snake_case: Dict = kwargs.pop("""padding_side""" , """right""" )
__snake_case: Union[str, Any] = kwargs.pop("""return_attention_mask""" , A )
super().__init__(**A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A : Union[bool, str, PaddingStrategy] = True , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__snake_case: Optional[int] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
__snake_case: Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A ) == 0:
if return_attention_mask:
__snake_case: Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__snake_case: int = required_input[0]
if isinstance(A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__snake_case: Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A ):
__snake_case: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A ):
__snake_case: str = """tf"""
elif is_torch_tensor(A ):
__snake_case: str = """pt"""
elif isinstance(A , (int, float, list, tuple, np.ndarray) ):
__snake_case: List[str] = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(A )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__snake_case: List[Any] = to_numpy(A )
else:
__snake_case: Union[str, Any] = [to_numpy(A ) for v in value]
# Convert padding_strategy in PaddingStrategy
__snake_case: Union[str, Any] = self._get_padding_strategies(padding=A , max_length=A )
__snake_case: Any = processed_features[self.model_input_names[0]]
__snake_case: int = len(A )
if not all(len(A ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__snake_case: Union[str, Any] = []
for i in range(A ):
__snake_case: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__snake_case: Tuple = self._truncate(
A , max_length=A , pad_to_multiple_of=A , truncation=A , )
truncated_inputs.append(A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__snake_case: Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__snake_case: List[str] = PaddingStrategy.MAX_LENGTH
__snake_case: List[Any] = {}
for i in range(A ):
# padding
__snake_case: Any = self._pad(
truncated_inputs[i] , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
for key, value in outputs.items():
if key not in batch_outputs:
__snake_case: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__snake_case: str = value.astype(np.floataa )
batch_outputs[key].append(A )
return BatchFeature(A , tensor_type=A )
def UpperCAmelCase__ ( self : int , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ):
__snake_case: List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__snake_case: List[str] = len(A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__snake_case: List[str] = np.ones(len(A ) , dtype=np.intaa )
if needs_to_be_padded:
__snake_case: Any = max_length - len(A )
if self.padding_side == "right":
if return_attention_mask:
__snake_case: Optional[int] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__snake_case: Any = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__snake_case: Union[str, Any] = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__snake_case: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__snake_case: Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__snake_case: str = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : Optional[int] = None , A : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Tuple = len(A ) > max_length
if needs_to_be_truncated:
__snake_case: List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__snake_case: int = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCAmelCase__ ( self : int , A : int=False , A : int=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__snake_case: Optional[int] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A , A ):
__snake_case: Optional[int] = PaddingStrategy(A )
elif isinstance(A , A ):
__snake_case: Any = padding
else:
__snake_case: Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 293 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = state_dict.pop(lowerCamelCase )
UpperCAmelCase__ = val
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase__ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
return new_state_dict
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[:2_5_6, :]
UpperCAmelCase__ = in_proj_bias[:2_5_6]
UpperCAmelCase__ = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase__ = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase__ = in_proj_weight[-2_5_6:, :]
UpperCAmelCase__ = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[:2_5_6, :]
UpperCAmelCase__ = in_proj_bias[:2_5_6]
UpperCAmelCase__ = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase__ = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase__ = in_proj_weight[-2_5_6:, :]
UpperCAmelCase__ = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCAmelCase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase__ = in_proj_weight_cross_attn[:2_5_6, :]
UpperCAmelCase__ = in_proj_bias_cross_attn[:2_5_6]
UpperCAmelCase__ = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
UpperCAmelCase__ = in_proj_bias_cross_attn[2_5_6:5_1_2]
UpperCAmelCase__ = in_proj_weight_cross_attn[-2_5_6:, :]
UpperCAmelCase__ = in_proj_bias_cross_attn[-2_5_6:]
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = image.size
UpperCAmelCase__ = max(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 8_0_0 if 'detection' in checkpoint_url else 1_0_0_0
UpperCAmelCase__ = target_max_size / current_max_size
UpperCAmelCase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = F.to_tensor(lowerCamelCase )
UpperCAmelCase__ = F.normalize(lowerCamelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
logger.info('Converting model...' )
# load original state dict
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = rename_backbone_keys(lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase__ = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCAmelCase__ = state_dict.pop(lowerCamelCase )
UpperCAmelCase__ = val
# create HuggingFace model and load state dict
UpperCAmelCase__ = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase__ = 1_5
UpperCAmelCase__ = 2
UpperCAmelCase__ = {0: 'table', 1: 'table rotated'}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase__ = 1_2_5
UpperCAmelCase__ = 6
UpperCAmelCase__ = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = DetrImageProcessor(
format='coco_detection' , max_size=8_0_0 if 'detection' in checkpoint_url else 1_0_0_0 )
UpperCAmelCase__ = TableTransformerForObjectDetection(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# verify our conversion
UpperCAmelCase__ = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
UpperCAmelCase__ = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=lowerCamelCase )
UpperCAmelCase__ = Image.open(lowerCamelCase ).convert('RGB' )
UpperCAmelCase__ = normalize(resize(lowerCamelCase , lowerCamelCase ) ).unsqueeze(0 )
UpperCAmelCase__ = model(lowerCamelCase )
if "detection" in checkpoint_url:
UpperCAmelCase__ = (1, 1_5, 3)
UpperCAmelCase__ = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
UpperCAmelCase__ = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
UpperCAmelCase__ = (1, 1_2_5, 7)
UpperCAmelCase__ = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
UpperCAmelCase__ = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
UpperCAmelCase__ = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(lowerCamelCase )
image_processor.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 98 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = SpeechTaTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = SpeechTaTokenizer(lowercase )
A__ = AddedToken("<mask>" , lstrip=lowercase , rstrip=lowercase )
A__ = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = "this is a test"
A__ = "this is a test"
return input_text, output_text
def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ = self.get_input_output_texts(lowercase )
A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
A__ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
return text, ids
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = "<pad>"
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(lowercase ) , 81 )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A__ = tokenizer.vocab_size
A__ = len(lowercase )
self.assertNotEqual(lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A__ = ["aaaaa bbbbbb", "cccccccccdddddddd"]
A__ = tokenizer.add_tokens(lowercase )
A__ = tokenizer.vocab_size
A__ = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size + len(lowercase ) )
A__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
A__ = tokenizer.add_special_tokens(lowercase )
A__ = tokenizer.vocab_size
A__ = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size_a + len(lowercase ) )
A__ = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.get_tokenizer()
A__ = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(lowercase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
A__ = tokenizer.convert_tokens_to_ids(lowercase )
# fmt: off
self.assertListEqual(lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
A__ = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
A__ = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowercase , )
| 68 | 0 |
'''simple docstring'''
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 351 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """wavlm"""
def __init__( self : List[str] , _lowerCAmelCase : List[Any]=3_2 , _lowerCAmelCase : int=7_6_8 , _lowerCAmelCase : Any=1_2 , _lowerCAmelCase : Union[str, Any]=1_2 , _lowerCAmelCase : List[Any]=3_0_7_2 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : List[Any]="group" , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : Dict=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _lowerCAmelCase : Any=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : int=1_2_8 , _lowerCAmelCase : Tuple=1_6 , _lowerCAmelCase : Optional[int]=3_2_0 , _lowerCAmelCase : Union[str, Any]=8_0_0 , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any=0.05 , _lowerCAmelCase : List[Any]=1_0 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Union[str, Any]=1_0 , _lowerCAmelCase : List[Any]=3_2_0 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[int]=1_0_0 , _lowerCAmelCase : Tuple=2_5_6 , _lowerCAmelCase : Union[str, Any]=2_5_6 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple="mean" , _lowerCAmelCase : Any=False , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Any=2_5_6 , _lowerCAmelCase : Tuple=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , _lowerCAmelCase : Dict=(5, 3, 3, 1, 1) , _lowerCAmelCase : Dict=(1, 2, 3, 1, 1) , _lowerCAmelCase : int=5_1_2 , _lowerCAmelCase : Optional[int]=8_0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Any=3 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : List[str] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase)
__lowercase =hidden_size
__lowercase =feat_extract_norm
__lowercase =feat_extract_activation
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =conv_bias
__lowercase =num_buckets
__lowercase =max_bucket_distance
__lowercase =num_conv_pos_embeddings
__lowercase =num_conv_pos_embedding_groups
__lowercase =len(self.conv_dim)
__lowercase =num_hidden_layers
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =num_attention_heads
__lowercase =hidden_dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =feat_proj_dropout
__lowercase =final_dropout
__lowercase =layerdrop
__lowercase =layer_norm_eps
__lowercase =initializer_range
__lowercase =num_ctc_classes
__lowercase =vocab_size
__lowercase =do_stable_layer_norm
__lowercase =use_weighted_layer_sum
__lowercase =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase =apply_spec_augment
__lowercase =mask_time_prob
__lowercase =mask_time_length
__lowercase =mask_time_min_masks
__lowercase =mask_feature_prob
__lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase =num_codevectors_per_group
__lowercase =num_codevector_groups
__lowercase =contrastive_logits_temperature
__lowercase =num_negatives
__lowercase =codevector_dim
__lowercase =proj_codevector_dim
__lowercase =diversity_loss_weight
# ctc loss
__lowercase =ctc_loss_reduction
__lowercase =ctc_zero_infinity
# adapter
__lowercase =add_adapter
__lowercase =adapter_kernel_size
__lowercase =adapter_stride
__lowercase =num_adapter_layers
__lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =xvector_output_dim
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 48 | 0 |
'''simple docstring'''
import os
lowerCAmelCase__ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def _A ( A__ ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
while index < len(A__ ) - 1:
__lowercase = SYMBOLS[numerals[index]]
__lowercase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _A ( A__ ):
"""simple docstring"""
__lowercase = ''''''
__lowercase = num // 1000
numerals += m_count * "M"
num %= 1000
__lowercase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__lowercase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _A ( A__ = "/p089_roman.txt" ):
"""simple docstring"""
__lowercase = 0
with open(os.path.dirname(A__ ) + roman_numerals_filename ) as filea:
__lowercase = filea.readlines()
for line in lines:
__lowercase = line.strip()
__lowercase = parse_roman_numerals(A__ )
__lowercase = generate_roman_numerals(A__ )
savings += len(A__ ) - len(A__ )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 104 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__lowercase = parser.parse_args()
return args.f
def _A ( A__ ):
"""simple docstring"""
__lowercase = {}
__lowercase = os.path.join(A__ , '''all_results.json''' )
if os.path.exists(A__ ):
with open(A__ , '''r''' ) as f:
__lowercase = json.load(A__ )
else:
raise ValueError(F"can't find {path}" )
return results
def _A ( ):
"""simple docstring"""
__lowercase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(cls.tmpdir ,'''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__lowercase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.7_5 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertLess(result['''perplexity'''] ,1_0_0 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertLess(result['''perplexity'''] ,4_2 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__lowercase = 7 if get_gpu_count() > 1 else 2
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.7_5 )
self.assertLess(result['''train_loss'''] ,0.5 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] ,2_8 )
self.assertGreaterEqual(result['''eval_exact'''] ,2_8 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.8 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] ,1_0 )
self.assertGreaterEqual(result['''eval_rouge2'''] ,2 )
self.assertGreaterEqual(result['''eval_rougeL'''] ,7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] ,7 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_bleu'''] ,3_0 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''translation_no_trainer''' ) ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase__ )
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] ,0.1_0 )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.6 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''image_classification_no_trainer''' ) ) )
| 104 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowercase_ = Mapping[str, np.ndarray]
lowercase_ = Mapping[str, Any] # Is a nested dict.
lowercase_ = 0.01
@dataclasses.dataclass(frozen=UpperCAmelCase )
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_UpperCamelCase : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_UpperCamelCase : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_UpperCamelCase : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_UpperCamelCase : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_UpperCamelCase : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_UpperCamelCase : Optional[str] = None
# Templates used to generate this protein (prediction-only)
_UpperCamelCase : Optional[Sequence[str]] = None
# Chain corresponding to each parent
_UpperCamelCase : Optional[Sequence[int]] = None
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Protein:
lowercase__ = R'(\[[A-Z]+\]\n)'
lowercase__ = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0]
lowercase__ = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
lowercase__ = ["N", "CA", "C"]
lowercase__ = None
lowercase__ = None
lowercase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ = g[1][0].strip()
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ = 'X' # FIXME: strings are immutable
lowercase__ = np.array(
[residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ = []
for axis in range(3 ):
tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) )
lowercase__ = np.array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
lowercase__ = np.zeros(
(
len(_SCREAMING_SNAKE_CASE ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ) -> List[str]:
lowercase__ = []
lowercase__ = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
lowercase__ = prot.parents
lowercase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id]
if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0:
lowercase__ = ['N/A']
pdb_headers.append(F"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}""" )
return pdb_headers
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = []
lowercase__ = pdb_str.split('\n' )
lowercase__ = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
lowercase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ = []
if prot.parents_chain_index is not None:
lowercase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] )
parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE )
lowercase__ = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ['N/A'] )
parents_per_chain.append(_SCREAMING_SNAKE_CASE )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ = [['N/A']]
def make_parent_line(_SCREAMING_SNAKE_CASE ) -> str:
return F"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ = 0
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_SCREAMING_SNAKE_CASE )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_SCREAMING_SNAKE_CASE ):
lowercase__ = parents_per_chain[chain_counter]
else:
lowercase__ = ['N/A']
out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = residue_constants.restypes + ['X']
def res_atoa(_SCREAMING_SNAKE_CASE ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
lowercase__ = residue_constants.atom_types
lowercase__ = []
lowercase__ = prot.atom_mask
lowercase__ = prot.aatype
lowercase__ = prot.atom_positions
lowercase__ = prot.residue_index.astype(np.intaa )
lowercase__ = prot.b_factors
lowercase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
lowercase__ = get_pdb_headers(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
pdb_lines.extend(_SCREAMING_SNAKE_CASE )
lowercase__ = aatype.shape[0]
lowercase__ = 1
lowercase__ = 0
lowercase__ = string.ascii_uppercase
lowercase__ = None
# Add all atom sites.
for i in range(_SCREAMING_SNAKE_CASE ):
lowercase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ = 'ATOM'
lowercase__ = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else F""" {atom_name}"""
lowercase__ = ''
lowercase__ = ''
lowercase__ = 1.0_0
lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ = ''
lowercase__ = 'A'
if chain_index is not None:
lowercase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
lowercase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ = True
lowercase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ = 'TER'
lowercase__ = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> Protein:
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
| 269 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Optional[Any] = 'transfo-xl'
_UpperCamelCase : Any = ['mems']
_UpperCamelCase : Any = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , a : Optional[int]=267_735 , a : str=[20_000, 40_000, 200_000] , a : str=1_024 , a : str=1_024 , a : int=16 , a : Optional[int]=64 , a : Optional[int]=4_096 , a : int=4 , a : Tuple=False , a : Any=18 , a : Tuple=1_600 , a : Union[str, Any]=1_000 , a : str=True , a : Dict=True , a : Any=0 , a : List[Any]=-1 , a : List[Any]=True , a : Tuple=0.1 , a : List[Any]=0.0 , a : Optional[Any]=True , a : int="normal" , a : Optional[Any]=0.01 , a : str=0.01 , a : List[Any]=0.02 , a : List[Any]=1E-5 , a : Optional[Any]=0 , **a : Optional[int] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = []
self.cutoffs.extend(a )
if proj_share_all_but_first:
lowercase__ = [False] + [True] * len(self.cutoffs )
else:
lowercase__ = [False] + [False] * len(self.cutoffs )
lowercase__ = d_model
lowercase__ = d_embed
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = div_val
lowercase__ = pre_lnorm
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = mem_len
lowercase__ = same_length
lowercase__ = attn_type
lowercase__ = clamp_len
lowercase__ = sample_softmax
lowercase__ = adaptive
lowercase__ = dropout
lowercase__ = dropatt
lowercase__ = untie_r
lowercase__ = init
lowercase__ = init_range
lowercase__ = proj_init_std
lowercase__ = init_std
lowercase__ = layer_norm_epsilon
super().__init__(eos_token_id=a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Optional[int] )-> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 269 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case : Any = logging.get_logger(__name__)
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
a :Any = b.T
a :Tuple = np.sum(np.square(UpperCAmelCase_ ) , axis=1 )
a :Union[str, Any] = np.sum(np.square(UpperCAmelCase_ ) , axis=0 )
a :Tuple = np.matmul(UpperCAmelCase_ , UpperCAmelCase_ )
a :List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
"""simple docstring"""
a :Union[str, Any] = x.reshape(-1 , 3 )
a :List[Any] = squared_euclidean_distance(UpperCAmelCase_ , UpperCAmelCase_ )
return np.argmin(UpperCAmelCase_ , axis=1 )
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ['pixel_values']
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :int = size if size is not None else {'''height''': 256, '''width''': 256}
a :Any = get_size_dict(_lowerCamelCase )
a :List[Any] = np.array(_lowerCamelCase ) if clusters is not None else None
a :Optional[Any] = do_resize
a :Any = size
a :Dict = resample
a :Optional[int] = do_normalize
a :Optional[int] = do_color_quantize
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
a :Optional[Any] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
_lowerCamelCase , size=(size['''height'''], size['''width''']) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , ):
a :int = rescale(image=_lowerCamelCase , scale=1 / 127.5 , data_format=_lowerCamelCase )
a :List[Any] = image - 1
return image
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
a :Optional[int] = do_resize if do_resize is not None else self.do_resize
a :Optional[Any] = size if size is not None else self.size
a :Optional[int] = get_size_dict(_lowerCamelCase )
a :Union[str, Any] = resample if resample is not None else self.resample
a :int = do_normalize if do_normalize is not None else self.do_normalize
a :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a :List[str] = clusters if clusters is not None else self.clusters
a :List[str] = np.array(_lowerCamelCase )
a :List[Any] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
a :Tuple = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
a :List[str] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_normalize:
a :List[Any] = [self.normalize(image=_lowerCamelCase ) for image in images]
if do_color_quantize:
a :Union[str, Any] = [to_channel_dimension_format(_lowerCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a :Union[str, Any] = np.array(_lowerCamelCase )
a :List[Any] = color_quantize(_lowerCamelCase , _lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
a :List[str] = images.shape[0]
a :Optional[Any] = images.reshape(_lowerCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
a :Optional[int] = list(_lowerCamelCase )
else:
a :Tuple = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
a :Any = {'''input_ids''': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 94 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __lowerCAmelCase ( lowerCamelCase__ ):
# to overwrite at feature extractactor specific tests
__lowerCamelCase = None
__lowerCamelCase = None
@property
def snake_case ( self ):
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_snake_case , """feature_size""" ) )
self.assertTrue(hasattr(_snake_case , """sampling_rate""" ) )
self.assertTrue(hasattr(_snake_case , """padding_value""" ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_snake_case ) == len(_snake_case ) for x, y in zip(_snake_case , processed_features[input_name] ) ) )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def snake_case ( self , _snake_case=False ):
"""simple docstring"""
def _inputs_have_equal_length(_snake_case ):
_lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_snake_case ) != length:
return False
return True
def _inputs_are_equal(_snake_case , _snake_case ):
if len(_snake_case ) != len(_snake_case ):
return False
for input_slice_a, input_slice_a in zip(_snake_case , _snake_case ):
if not np.allclose(np.asarray(_snake_case ) , np.asarray(_snake_case ) , atol=1e-3 ):
return False
return True
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = self.feat_extract_tester.seq_length_diff
_lowerCAmelCase = self.feat_extract_tester.max_seq_length + pad_diff
_lowerCAmelCase = self.feat_extract_tester.min_seq_length
_lowerCAmelCase = self.feat_extract_tester.batch_size
_lowerCAmelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_lowerCAmelCase = feat_extract.pad(_snake_case , padding=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""max_length""" )[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=_snake_case , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_lowerCAmelCase = feat_extract.pad(_snake_case , pad_to_multiple_of=10 )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , pad_to_multiple_of=10 )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_snake_case , return_tensors="""np""" , )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(all(len(_snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
_lowerCAmelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_lowerCAmelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def snake_case ( self , _snake_case=False ):
"""simple docstring"""
def _inputs_have_equal_length(_snake_case ):
_lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_snake_case ) != length:
return False
return True
def _inputs_are_equal(_snake_case , _snake_case ):
if len(_snake_case ) != len(_snake_case ):
return False
for input_slice_a, input_slice_a in zip(_snake_case , _snake_case ):
if not np.allclose(np.asarray(_snake_case ) , np.asarray(_snake_case ) , atol=1e-3 ):
return False
return True
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
# truncate to smallest with np
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=_snake_case , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
# truncate to middle
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_snake_case , return_tensors="""np""" , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , truncation=_snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""longest""" , truncation=_snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""longest""" , truncation=_snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""max_length""" , truncation=_snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_lowerCAmelCase = 12
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_snake_case , truncation=_snake_case , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_snake_case , )
_lowerCAmelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_lowerCAmelCase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_lowerCAmelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
def snake_case ( self ):
"""simple docstring"""
self._check_padding(numpify=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._check_padding(numpify=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._check_truncation(numpify=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._check_truncation(numpify=_snake_case )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_dict
_lowerCAmelCase = True
_lowerCAmelCase = self.feature_extraction_class(**_snake_case )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = [len(_snake_case ) for x in speech_inputs]
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_dict
_lowerCAmelCase = True
_lowerCAmelCase = self.feature_extraction_class(**_snake_case )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = [len(_snake_case ) for x in speech_inputs]
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = min(_snake_case )
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=_snake_case , truncation=_snake_case , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 82 | 0 |
"import argparse\r\nimport json\r\nfrom pathlib import Path\r\n\r\nimport requests\r\nimport timm\r\(...TRUNCATED) | 35 | "from .dependency_versions_table import deps\r\nfrom .utils.versions import require_version, require(...TRUNCATED) | 35 | 1 |
Dataset Card for "python_codestyles-mixed1-500"
This dataset contains negative and positive examples with python code of compliance with a code style. A positive
example represents compliance with the code style (label is 1). Each example is composed of two components, the first
component consists of a code that either conforms to the code style or violates it and the second component
corresponding to an example code that already conforms to a code style.
The dataset combines both
datasets infinityofspace/python_codestyles-random-500
and infinityofspace/python_codestyles-single-500
by randomly selecting half of the examples from each of the two datasets.
The code styles in the combined dataset differ in at least one and exactly one codestyle rule, which is called a
mixed
codestyle dataset variant. The dataset consists of a training and test group, with none of the code styles
overlapping between groups. In addition, both groups contain completely different underlying codes.
The examples contain source code from the following repositories:
repository | tag or commit |
---|---|
TheAlgorithms/Python | f614ed72170011d2d439f7901e1c8daa7deac8c4 |
huggingface/transformers | v4.31.0 |
huggingface/datasets | 2.13.1 |
huggingface/diffusers | v0.18.2 |
huggingface/accelerate | v0.21.0 |
- Downloads last month
- 49