code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
A__ : int = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def _snake_case ( lowerCamelCase__ : dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] ) -> list[str]:
lowerCamelCase_ : Optional[int] =set()
# keep track of all the paths to be checked
lowerCamelCase_ : Optional[Any] =[[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowerCamelCase_ : str =queue.pop(0 )
# get the last node from the path
lowerCamelCase_ : List[Any] =path[-1]
if node not in explored:
lowerCamelCase_ : Optional[int] =graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowerCamelCase_ : Any =list(lowerCamelCase__ )
new_path.append(lowerCamelCase__ )
queue.append(lowerCamelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCamelCase__ )
# in case there's no path between the 2 nodes
return []
def _snake_case ( lowerCamelCase__ : dict , lowerCamelCase__ : Dict , lowerCamelCase__ : str ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowerCamelCase_ : Any =[start]
lowerCamelCase_ : Dict =set(lowerCamelCase__ )
# Keep tab on distances from `start` node.
lowerCamelCase_ : Union[str, Any] ={start: 0, target: -1}
while queue:
lowerCamelCase_ : int =queue.pop(0 )
if node == target:
lowerCamelCase_ : Union[str, Any] =(
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCamelCase__ )
queue.append(lowerCamelCase__ )
lowerCamelCase_ : str =dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 144 |
"""simple docstring"""
import math
def _snake_case ( lowerCamelCase__ : list , lowerCamelCase__ : int ) -> int:
lowerCamelCase_ : int =len(lowerCamelCase__ )
lowerCamelCase_ : List[Any] =int(math.floor(math.sqrt(lowerCamelCase__ ) ) )
lowerCamelCase_ : List[Any] =0
while arr[min(lowerCamelCase__ , lowerCamelCase__ ) - 1] < x:
lowerCamelCase_ : str =step
step += int(math.floor(math.sqrt(lowerCamelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowerCamelCase_ : Dict =prev + 1
if prev == min(lowerCamelCase__ , lowerCamelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A__ : List[Any] = input('Enter numbers separated by a comma:\n').strip()
A__ : Optional[Any] = [int(item) for item in user_input.split(',')]
A__ : List[str] = int(input('Enter the number to be searched:\n'))
A__ : Any = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f'Number {x} is at index {res}')
| 144 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=7, lowerCamelCase__=3, lowerCamelCase__=18, lowerCamelCase__=30, lowerCamelCase__=400, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=[0.5, 0.5, 0.5], lowerCamelCase__=[0.5, 0.5, 0.5], ):
A : List[str] = parent
A : Dict = batch_size
A : str = num_channels
A : List[Any] = image_size
A : List[Any] = min_resolution
A : Tuple = max_resolution
A : Any = do_resize
A : Optional[int] = size if size is not None else {"""height""": 18, """width""": 20}
A : Tuple = do_thumbnail
A : List[str] = do_align_axis
A : Optional[int] = do_pad
A : List[Any] = do_normalize
A : Optional[Any] = image_mean
A : str = image_std
def _lowerCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = DonutImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
A : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
A : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__, """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """size""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_thumbnail""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_align_long_axis""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_pad""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """image_std""" ) )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"""height""": 18, """width""": 20} )
A : Dict = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
A : Any = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {"""height""": 84, """width""": 42} )
def _lowerCAmelCase ( self ):
pass
@is_flaky()
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, Image.Image )
# Test not batched input
A : Optional[int] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
# Test batched
A : List[Any] = image_processing(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
@is_flaky()
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, np.ndarray )
# Test not batched input
A : Dict = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
# Test batched
A : List[Any] = image_processing(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
@is_flaky()
def _lowerCAmelCase ( self ):
# Initialize image_processing
A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase__, torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, torch.Tensor )
# Test not batched input
A : Tuple = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
# Test batched
A : Union[str, Any] = image_processing(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
| 115 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A : Union[str, Any] = nn.Linear(3, 4 )
A : Union[str, Any] = nn.BatchNormad(4 )
A : Optional[Any] = nn.Linear(4, 5 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, *lowerCamelCase__, **lowerCamelCase__ ):
return (args[0] + 1,) + args[1:], kwargs
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return output + 1
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Any = ModelHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
self.assertEqual(test_model._hf_hook, lowerCamelCase__ )
self.assertTrue(hasattr(lowerCamelCase__, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__, """_hf_hook""" ) )
self.assertFalse(hasattr(lowerCamelCase__, """_old_forward""" ) )
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Optional[int] = ModelHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__, append=lowerCamelCase__ )
self.assertEqual(isinstance(test_model._hf_hook, lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ), 2 )
self.assertTrue(hasattr(lowerCamelCase__, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__, """_hf_hook""" ) )
self.assertFalse(hasattr(lowerCamelCase__, """_old_forward""" ) )
def _lowerCAmelCase ( self ):
A : Any = ModelForTest()
A : Tuple = torch.randn(2, 3 )
A : Optional[int] = test_model(x + 1 )
A : List[str] = test_model(x + 2 )
A : List[str] = PreForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A : Optional[Any] = PreForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Tuple = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A : Any = SequentialHook(PreForwardHook(), PreForwardHook() )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Optional[Any] = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 )
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Any = torch.randn(2, 3 )
A : Any = test_model(lowerCamelCase__ )
A : List[Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : str = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1, atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A : Tuple = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Optional[int] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1, atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A : Dict = SequentialHook(PostForwardHook(), PostForwardHook() )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__, output + 2, atol=1e-5 )
def _lowerCAmelCase ( self ):
A : List[Any] = ModelForTest()
A : Tuple = torch.randn(2, 3 )
A : Union[str, Any] = test_model(lowerCamelCase__ )
A : List[Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1 ) )
self.assertTrue(outputa.requires_grad )
A : int = True
A : Tuple = test_model(lowerCamelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowerCAmelCase ( self ):
A : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device(0 ) )
self.assertEqual(model.lineara.weight.device, torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
A : str = torch.randn(2, 3 )
A : int = model(lowerCamelCase__ )
self.assertEqual(output.device, torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCamelCase__, AlignDevicesHook(io_same_device=lowerCamelCase__ ) )
A : int = torch.randn(2, 3 ).to(0 )
A : str = model(lowerCamelCase__ )
self.assertEqual(output.device, torch.device(0 ) )
def _lowerCAmelCase ( self ):
A : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : int = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : int = torch.randn(2, 3 )
A : List[Any] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
A : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : int = torch.randn(2, 3 )
A : str = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def _lowerCAmelCase ( self ):
A : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Optional[int] = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : List[str] = torch.randn(2, 3 )
A : Optional[int] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, offload_buffers=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : List[str] = torch.randn(2, 3 )
A : List[str] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def _lowerCAmelCase ( self ):
A : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : List[str] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Any = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : Optional[Any] = torch.randn(2, 3 )
A : Tuple = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, weights_map=model.state_dict(), offload_buffers=lowerCamelCase__, )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : List[Any] = torch.randn(2, 3 )
A : Dict = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
| 115 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCAmelCase ={
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__UpperCAmelCase ={
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
@lru_cache()
def __lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCamelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__lowerCamelCase = bs[:]
__lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
__lowerCamelCase = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def __lowerCAmelCase ( UpperCamelCase__ ) -> Dict:
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
return pairs
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Tuple =VOCAB_FILES_NAMES
lowerCamelCase : Any =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Any =["input_ids", "attention_mask"]
def __init__( self : List[str] , a : Tuple , a : Any , a : Optional[Any]="replace" , a : int="<s>" , a : Union[str, Any]="</s>" , a : int="</s>" , a : Tuple="<s>" , a : Any="<unk>" , a : Tuple="<pad>" , a : Dict="<mask>" , a : Any=False , **a : str , ):
"""simple docstring"""
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(a )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
__lowerCamelCase = errors # how to handle errors in decoding
__lowerCamelCase = bytes_to_unicode()
__lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding='''utf-8''' ) as merges_handle:
__lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase = {}
__lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : int , a : List[Any] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowerCamelCase = tuple(a )
__lowerCamelCase = get_pairs(a )
if not pairs:
return token
while True:
__lowerCamelCase = min(a , key=lambda a : self.bpe_ranks.get(a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(a ):
try:
__lowerCamelCase = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(a )
__lowerCamelCase = new_word
if len(a ) == 1:
break
else:
__lowerCamelCase = get_pairs(a )
__lowerCamelCase = ''' '''.join(a )
__lowerCamelCase = word
return word
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = []
for token in re.findall(self.pat , a ):
__lowerCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(''' ''' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Union[str, Any] ):
"""simple docstring"""
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Dict ):
"""simple docstring"""
return self.decoder.get(a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : int ):
"""simple docstring"""
__lowerCamelCase = ''''''.join(a )
__lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '''\n''' )
__lowerCamelCase = 0
with open(a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__lowerCamelCase = token_index
writer.write(''' '''.join(a ) + '''\n''' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : int , a : Any , a : List[Any]=False , **a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
__lowerCamelCase = ''' ''' + text
return (text, kwargs)
| 67 | '''simple docstring'''
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : str , *a : Optional[int] , **a : List[Any] ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__lowerCamelCase = kwargs.pop('''main_process_only''' , a )
__lowerCamelCase = kwargs.pop('''in_order''' , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
__lowerCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if log_level is None:
__lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase__ )
__lowerCamelCase = logging.getLogger(UpperCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase__ , {} )
| 67 | 1 |
def UpperCamelCase ( _A = 10, _A = 22 ):
"""simple docstring"""
__magic_name__ : Any = range(1, _A )
__magic_name__ : Any = range(1, _A )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 138 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase ( _A ):
"""simple docstring"""
if not isinstance(_A, _A ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__magic_name__ : Dict = precision
__magic_name__ : str = ceil(precision / 14 )
__magic_name__ : List[str] = 426880 * Decimal(10005 ).sqrt()
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = 13591409
__magic_name__ : Tuple = Decimal(_A )
for k in range(1, _A ):
__magic_name__ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_A ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__magic_name__: Tuple = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 138 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = "▁"
_A = {"vocab_file": "spiece.model"}
_A = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
_A = {
"google/reformer-crime-and-punishment": 52_42_88,
}
class _lowerCAmelCase ( __a ):
_lowercase =VOCAB_FILES_NAMES
_lowercase =PRETRAINED_VOCAB_FILES_MAP
_lowercase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase =['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase=[] , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def __a ( self ) -> Optional[Any]:
return self.sp_model.get_piece_size()
def __a ( self ) -> Dict[str, int]:
lowerCAmelCase_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self , _UpperCamelCase ) -> List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> Union[str, Any]:
return self.sp_model.piece_to_id(_UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> Optional[Any]:
if index < self.sp_model.get_piece_size():
lowerCAmelCase_ = self.sp_model.IdToPiece(_UpperCamelCase )
return token
def __a ( self , _UpperCamelCase ) -> str:
lowerCAmelCase_ = []
lowerCAmelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCamelCase ) + token
lowerCAmelCase_ = []
else:
current_sub_tokens.append(_UpperCamelCase )
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def __a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 231 |
from pathlib import Path
import fire
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = Path(__lowerCAmelCase )
lowerCAmelCase_ = Path(__lowerCAmelCase )
dest_dir.mkdir(exist_ok=__lowerCAmelCase )
for path in src_dir.iterdir():
lowerCAmelCase_ = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase_ = dest_dir.joinpath(path.name )
print(__lowerCAmelCase )
dest_path.open("w" ).write("\n".join(__lowerCAmelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 231 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class _lowerCamelCase ( nn.Module ):
UpperCAmelCase_ = 42
UpperCAmelCase_ = jnp.floataa
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self , __a ) -> Union[str, Any]:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = hidden_states.shape
UpperCamelCase = jax.image.resize(
__a , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
UpperCamelCase = self.conv(__a )
return hidden_states
class _lowerCamelCase ( nn.Module ):
UpperCAmelCase_ = 42
UpperCAmelCase_ = jnp.floataa
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self , __a ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
UpperCamelCase = self.conv(__a )
return hidden_states
class _lowerCamelCase ( nn.Module ):
UpperCAmelCase_ = 42
UpperCAmelCase_ = None
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = None
UpperCAmelCase_ = jnp.floataa
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = self.in_channels if self.out_channels is None else self.out_channels
UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCamelCase = nn.Conv(
__a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase = nn.Dense(__a , dtype=self.dtype )
UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCamelCase = nn.Dropout(self.dropout_prob )
UpperCamelCase = nn.Conv(
__a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
UpperCamelCase = None
if use_nin_shortcut:
UpperCamelCase = nn.Conv(
__a , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__(self , __a , __a , __a=True ) -> List[Any]:
UpperCamelCase = hidden_states
UpperCamelCase = self.norma(__a )
UpperCamelCase = nn.swish(__a )
UpperCamelCase = self.conva(__a )
UpperCamelCase = self.time_emb_proj(nn.swish(__a ) )
UpperCamelCase = jnp.expand_dims(jnp.expand_dims(__a , 1 ) , 1 )
UpperCamelCase = hidden_states + temb
UpperCamelCase = self.norma(__a )
UpperCamelCase = nn.swish(__a )
UpperCamelCase = self.dropout(__a , __a )
UpperCamelCase = self.conva(__a )
if self.conv_shortcut is not None:
UpperCamelCase = self.conv_shortcut(__a )
return hidden_states + residual
| 244 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCAmelCase__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
lowerCAmelCase__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
lowerCAmelCase__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return float((preds == labels).mean() )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="binary" ):
"""simple docstring"""
UpperCamelCase = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {}
for id_pred, label in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = F"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
UpperCamelCase = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase = [(pred, label)]
UpperCamelCase , UpperCamelCase = [], []
for question, preds_labels in question_map.items():
UpperCamelCase , UpperCamelCase = zip(*_SCREAMING_SNAKE_CASE )
UpperCamelCase = fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average="macro" )
fas.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = int(sum(pred == label for pred, label in preds_labels ) == len(_SCREAMING_SNAKE_CASE ) )
ems.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = float(sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def snake_case_ (self ) -> Dict:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def snake_case_ (self ) -> Tuple:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def snake_case_ (self , __a , __a ) -> str:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__a , __a )}
elif self.config_name == "cb":
return acc_and_fa(__a , __a , fa_avg="macro" )
elif self.config_name == "record":
UpperCamelCase = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCamelCase = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(__a , __a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__a , __a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__a , __a )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 244 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( A_, unittest.TestCase ):
lowercase__ = DiTPipeline
lowercase__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
lowercase__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase__ = False
def __magic_name__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
A__ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case_ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=snake_case_ , )
A__ = AutoencoderKL()
A__ = DDIMScheduler()
A__ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def __magic_name__ ( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Dict=0 ) -> Any:
'''simple docstring'''
if str(snake_case_ ).startswith("mps" ):
A__ = torch.manual_seed(snake_case_ )
else:
A__ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A__ = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __magic_name__ ( self : Optional[int] ) -> str:
'''simple docstring'''
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
A__ = self.get_dummy_inputs(snake_case_ )
A__ = pipe(**snake_case_ ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
A__ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1e-3 )
def __magic_name__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=snake_case_ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __magic_name__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
A__ = torch.manual_seed(0 )
A__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
A__ = ["vase", "umbrella", "white shark", "white wolf"]
A__ = pipe.get_label_ids(snake_case_ )
A__ = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(snake_case_ , snake_case_ ):
A__ = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __magic_name__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
A__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
A__ = ["vase", "umbrella"]
A__ = pipe.get_label_ids(snake_case_ )
A__ = torch.manual_seed(0 )
A__ = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(snake_case_ , snake_case_ ):
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 247 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
SCREAMING_SNAKE_CASE = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to log verbose messages or not.'''}, )
lowercase__ = field(
default=2.0, metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
lowercase__ = field(
default=0.5, metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
lowercase__ = field(
default=0.99_99_95, metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
A__ = logging.WARNING
if model_args.verbose_logging:
A__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
A__ = logging.INFO
logger.setLevel(lowercase_ )
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
default=A_, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowercase__ = field(
default='''train''', metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
}, )
lowercase__ = field(
default='''validation''', metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
}, )
lowercase__ = field(
default='''file''', metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowercase__ = field(
default=1, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
lowercase__ = field(
default=20.0, metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class UpperCAmelCase_ :
lowercase__ = 42
lowercase__ = 42
lowercase__ = "longest"
lowercase__ = None
lowercase__ = None
def __call__( self : Tuple , snake_case_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A__ = self.feature_extractor.pad(
snake_case_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
A__ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
A__ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
A__ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
A__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
A__ = 1
A__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
A__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=snake_case_ , min_masks=2 , )
return batch
class UpperCAmelCase_ ( A_ ):
def __init__( self : Any , *snake_case_ : Dict , snake_case_ : Optional[int]=1 , snake_case_ : str=0 , snake_case_ : str=1.0 , **snake_case_ : List[str] ) -> List[str]:
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
A__ = 0
A__ = max_gumbel_temp
A__ = min_gumbel_temp
A__ = gumbel_temp_decay
def __magic_name__ ( self : Tuple , snake_case_ : nn.Module , snake_case_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
A__ = self._prepare_inputs(snake_case_ )
if self.use_amp:
with autocast():
A__ = self.compute_loss(snake_case_ , snake_case_ )
else:
A__ = self.compute_loss(snake_case_ , snake_case_ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
A__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
A__ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
A__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case_ ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case_ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__, A__, A__ = parser.parse_args_into_dataclasses()
configure_logger(lowercase_ , lowercase_ )
# Downloading and loading a dataset from the hub.
A__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
A__ = DatasetDict()
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
A__ = DatasetDict()
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
A__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowercase_ )
def prepare_dataset(lowercase_ ):
# check that all files have the correct sampling rate
A__, A__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
A__ = datasets.map(
lowercase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
A__ = vectorized_datasets.filter(
lambda lowercase_ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowercase_ ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
A__ = vectorized_datasets.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
A__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
A__ = WavaVecaForPreTraining(lowercase_ )
A__ = DataCollatorForWavaVecaPretraining(model=lowercase_ , feature_extractor=lowercase_ )
A__ = WavaVecaPreTrainer(
model=lowercase_ , data_collator=lowercase_ , args=lowercase_ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=lowercase_ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 247 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=snake_case__ ):
lowerCAmelCase : Optional[Any] = ["""keras_nlp"""]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["keras_nlp"] )
| 365 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase ( _A : str , _A : str )-> Any:
"""simple docstring"""
A__ = RobertaPreLayerNormConfig.from_pretrained(
_A , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A__ = torch.load(hf_hub_download(repo_id=_A , filename="pytorch_model.bin" ) )
A__ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A__ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A__ = tensor_value
A__ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_A , config=_A , state_dict=_A )
model.save_pretrained(_A )
# convert tokenizer
A__ = AutoTokenizer.from_pretrained(_A )
tokenizer.save_pretrained(_A )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 198 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : Dict = 32
def lowerCamelCase ( _UpperCamelCase : Accelerator , _UpperCamelCase : int = 1_6 ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : str = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : str = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_UpperCamelCase : str ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : int = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : List[str] = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_UpperCamelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : Tuple = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : List[str] = 1_6
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : Dict = 8
else:
__UpperCAmelCase : Tuple = None
return tokenizer.pad(
_UpperCamelCase , padding="""longest""" , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : List[str] = mocked_dataloaders # noqa: F811
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _UpperCamelCase ) == "1":
__UpperCAmelCase : Tuple = 2
# Initialize accelerator
__UpperCAmelCase : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : Any = config["""lr"""]
__UpperCAmelCase : str = int(config["""num_epochs"""] )
__UpperCAmelCase : Tuple = int(config["""seed"""] )
__UpperCAmelCase : Dict = int(config["""batch_size"""] )
__UpperCAmelCase : Dict = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__UpperCAmelCase : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCAmelCase : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
__UpperCAmelCase : Any = MAX_GPU_BATCH_SIZE
set_seed(_UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase : Dict = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Any = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : Optional[Any] = AdamW(params=model.parameters() , lr=_UpperCamelCase )
# Instantiate scheduler
__UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase : List[str] = model(**_UpperCamelCase )
__UpperCAmelCase : int = outputs.loss
__UpperCAmelCase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__UpperCAmelCase : Optional[int] = 0
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : Any = model(**_UpperCamelCase )
__UpperCAmelCase : Dict = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase ,__UpperCAmelCase : List[str] = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__UpperCAmelCase : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCAmelCase : Tuple = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
__UpperCAmelCase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _UpperCamelCase )
def lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_UpperCamelCase , default=_UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__UpperCAmelCase : Optional[int] = parser.parse_args()
__UpperCAmelCase : Any = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 115 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = None
__a = BloomTokenizerFast
__a = BloomTokenizerFast
__a = True
__a = False
__a = """tokenizer_file"""
__a = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Any = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.get_rust_tokenizer()
__UpperCAmelCase : Optional[Any] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__UpperCAmelCase : int = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
__UpperCAmelCase : Dict = tokenizer.batch_encode_plus(UpperCamelCase )["""input_ids"""]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : int = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCAmelCase : Dict = """This is a simple input"""
__UpperCAmelCase : str = ["""This is a simple input 1""", """This is a simple input 2"""]
__UpperCAmelCase : List[str] = ("""This is a simple input""", """This is a pair""")
__UpperCAmelCase : Dict = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__UpperCAmelCase : Union[str, Any] = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCAmelCase : Optional[Any] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = next(iter(UpperCamelCase ) )["""premise"""] # pick up one data
__UpperCAmelCase : Any = list(sample_data.values() )
__UpperCAmelCase : Optional[Any] = list(map(tokenizer.encode , UpperCamelCase ) )
__UpperCAmelCase : List[Any] = [tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) for x in output_tokens]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 115 | 1 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> str:
# A mock response for an HTTP head request to emulate server down
__UpperCamelCase :Optional[Any] = mock.Mock()
__UpperCamelCase :int = 500
__UpperCamelCase :List[Any] = {}
__UpperCamelCase :List[str] = HTTPError
__UpperCamelCase :List[str] = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase :str = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowercase) as mock_head:
__UpperCamelCase :Optional[int] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def UpperCamelCase__ ( self) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__UpperCamelCase :List[Any] = mock.Mock()
__UpperCamelCase :List[Any] = 500
__UpperCamelCase :int = {}
__UpperCamelCase :List[Any] = HTTPError
__UpperCamelCase :Union[str, Any] = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase :Optional[int] = GPTaTokenizerFast.from_pretrained('''gpt2''')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowercase) as mock_head:
__UpperCamelCase :Optional[int] = GPTaTokenizerFast.from_pretrained('''gpt2''')
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
try:
__UpperCamelCase :Any = tempfile.mktemp()
with open(__lowercase , '''wb''') as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , __lowercase)
__UpperCamelCase :Dict = AlbertTokenizer.from_pretrained(__lowercase)
finally:
os.remove(__lowercase)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json'''):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''') as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , __lowercase)
__UpperCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''')
def UpperCamelCase__ ( self) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
__UpperCamelCase :Any = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''')
@is_staging_test
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def UpperCamelCase__ ( cls) -> Dict:
__UpperCamelCase :Union[str, Any] = TOKEN
HfFolder.save_token(__lowercase)
@classmethod
def UpperCamelCase__ ( cls) -> str:
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''')
except HTTPError:
pass
def UpperCamelCase__ ( self) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase :List[Any] = os.path.join(__lowercase , '''vocab.txt''')
with open(__lowercase , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__UpperCamelCase :Optional[Any] = BertTokenizer(__lowercase)
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token)
__UpperCamelCase :Dict = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""")
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase , repo_id='''test-tokenizer''' , push_to_hub=__lowercase , use_auth_token=self._token)
__UpperCamelCase :List[str] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""")
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
def UpperCamelCase__ ( self) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase :Optional[Any] = os.path.join(__lowercase , '''vocab.txt''')
with open(__lowercase , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__UpperCamelCase :int = BertTokenizer(__lowercase)
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token)
__UpperCamelCase :Optional[int] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__lowercase , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=__lowercase , use_auth_token=self._token)
__UpperCamelCase :List[str] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
@require_tokenizers
def UpperCamelCase__ ( self) -> List[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase :Optional[int] = os.path.join(__lowercase , '''vocab.txt''')
with open(__lowercase , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__UpperCamelCase :Optional[Any] = CustomTokenizer(__lowercase)
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token)
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__lowercase)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase :int = os.path.join(__lowercase , '''vocab.txt''')
with open(__lowercase , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens]))
__UpperCamelCase :Tuple = BertTokenizerFast.from_pretrained(__lowercase)
bert_tokenizer.save_pretrained(__lowercase)
__UpperCamelCase :Optional[int] = CustomTokenizerFast.from_pretrained(__lowercase)
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token)
__UpperCamelCase :Any = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__lowercase)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''')
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained(
f"""{USER}/test-dynamic-tokenizer""" , use_fast=__lowercase , trust_remote_code=__lowercase)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''')
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = Trie()
trie.add('''Hello 友達''')
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}})
trie.add('''Hello''')
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}})
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Any = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''') , ['''[CLS] This is a extra_id_100'''])
trie.add('''[CLS]''')
trie.add('''extra_id_1''')
trie.add('''extra_id_100''')
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''') , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''])
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :str = Trie()
trie.add('''A''')
self.assertEqual(trie.split('''ABC''') , ['''A''', '''BC'''])
self.assertEqual(trie.split('''BCA''') , ['''BC''', '''A'''])
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Union[str, Any] = Trie()
trie.add('''TOKEN]''')
trie.add('''[SPECIAL_TOKEN]''')
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''') , ['''This is something ''', '''[SPECIAL_TOKEN]'''])
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Optional[Any] = Trie()
trie.add('''A''')
trie.add('''P''')
trie.add('''[SPECIAL_TOKEN]''')
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''') , ['''This is something ''', '''[SPECIAL_TOKEN]'''])
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Tuple = Trie()
trie.add('''AB''')
trie.add('''B''')
trie.add('''C''')
self.assertEqual(trie.split('''ABC''') , ['''AB''', '''C'''])
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = Trie()
trie.add('''ABC''')
trie.add('''B''')
trie.add('''CD''')
self.assertEqual(trie.split('''ABCD''') , ['''ABC''', '''D'''])
def UpperCamelCase__ ( self) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__UpperCamelCase :Dict = Trie()
__UpperCamelCase :Optional[int] = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3])
self.assertEqual(__lowercase , ['''AB''', '''C'''])
| 363 | from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowercase = logging.get_logger(__name__)
# General docstring
__lowercase = '''MobileNetV1Config'''
# Base docstring
__lowercase = '''google/mobilenet_v1_1.0_224'''
__lowercase = [1, 1024, 7, 7]
# Image classification docstring
__lowercase = '''google/mobilenet_v1_1.0_224'''
__lowercase = '''tabby, tabby cat'''
__lowercase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__UpperCamelCase :Tuple = {}
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Dict = model.mobilenet_va
else:
__UpperCamelCase :str = model
__UpperCamelCase :int = '''MobilenetV1/Conv2d_0/'''
__UpperCamelCase :str = backbone.conv_stem.convolution.weight
__UpperCamelCase :int = backbone.conv_stem.normalization.bias
__UpperCamelCase :Union[str, Any] = backbone.conv_stem.normalization.weight
__UpperCamelCase :Optional[int] = backbone.conv_stem.normalization.running_mean
__UpperCamelCase :Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__UpperCamelCase :Optional[Any] = i + 1
__UpperCamelCase :Optional[int] = i * 2
__UpperCamelCase :List[Any] = backbone.layer[pt_index]
__UpperCamelCase :Tuple = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__UpperCamelCase :Any = pointer.convolution.weight
__UpperCamelCase :Dict = pointer.normalization.bias
__UpperCamelCase :List[str] = pointer.normalization.weight
__UpperCamelCase :Any = pointer.normalization.running_mean
__UpperCamelCase :List[str] = pointer.normalization.running_var
__UpperCamelCase :Union[str, Any] = backbone.layer[pt_index + 1]
__UpperCamelCase :List[str] = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__UpperCamelCase :Optional[Any] = pointer.convolution.weight
__UpperCamelCase :Dict = pointer.normalization.bias
__UpperCamelCase :int = pointer.normalization.weight
__UpperCamelCase :Optional[int] = pointer.normalization.running_mean
__UpperCamelCase :Optional[int] = pointer.normalization.running_var
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__UpperCamelCase :Union[str, Any] = model.classifier.weight
__UpperCamelCase :int = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
__UpperCamelCase :Any = tf.train.list_variables(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
__UpperCamelCase :str = tf.train.load_variable(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = array
# Build TF to PyTorch weights loading map
__UpperCamelCase :Optional[Any] = _build_tf_to_pytorch_map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
__UpperCamelCase :Optional[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
__UpperCamelCase :Optional[int] = np.transpose(SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
__UpperCamelCase :Tuple = array.squeeze().transpose()
else:
__UpperCamelCase :Union[str, Any] = np.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
__UpperCamelCase :Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE )
tf_weights.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '''/RMSProp''' , SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '''/RMSProp_1''' , SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , SCREAMING_SNAKE_CASE )
logger.info(f"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :str = features.shape[-2:]
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = conv_layer.stride
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
__UpperCamelCase :Optional[int] = max(kernel_height - stride_height , 0 )
else:
__UpperCamelCase :List[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__UpperCamelCase :List[str] = max(kernel_width - stride_width , 0 )
else:
__UpperCamelCase :Tuple = max(kernel_width - (in_width % stride_width) , 0 )
__UpperCamelCase :Any = pad_along_width // 2
__UpperCamelCase :Tuple = pad_along_width - pad_left
__UpperCamelCase :Union[str, Any] = pad_along_height // 2
__UpperCamelCase :str = pad_along_height - pad_top
__UpperCamelCase :Optional[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''constant''' , 0.0 )
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = 1 , __lowercase = False , __lowercase = True , __lowercase = True , ) -> None:
super().__init__()
__UpperCamelCase :str = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""")
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""")
__UpperCamelCase :Any = 0 if config.tf_padding else int((kernel_size - 1) / 2)
__UpperCamelCase :List[Any] = nn.Convad(
in_channels=__lowercase , out_channels=__lowercase , kernel_size=__lowercase , stride=__lowercase , padding=__lowercase , groups=__lowercase , bias=__lowercase , padding_mode='''zeros''' , )
if use_normalization:
__UpperCamelCase :str = nn.BatchNormad(
num_features=__lowercase , eps=config.layer_norm_eps , momentum=0.99_97 , affine=__lowercase , track_running_stats=__lowercase , )
else:
__UpperCamelCase :Tuple = None
if use_activation:
if isinstance(__lowercase , __lowercase):
__UpperCamelCase :Union[str, Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowercase):
__UpperCamelCase :Dict = ACTaFN[config.hidden_act]
else:
__UpperCamelCase :List[Any] = config.hidden_act
else:
__UpperCamelCase :Optional[Any] = None
def UpperCamelCase__ ( self , __lowercase) -> torch.Tensor:
if self.config.tf_padding:
__UpperCamelCase :Any = apply_tf_padding(__lowercase , self.convolution)
__UpperCamelCase :str = self.convolution(__lowercase)
if self.normalization is not None:
__UpperCamelCase :Any = self.normalization(__lowercase)
if self.activation is not None:
__UpperCamelCase :List[str] = self.activation(__lowercase)
return features
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = MobileNetVaConfig
a__ : Dict = load_tf_weights_in_mobilenet_va
a__ : Tuple = """mobilenet_v1"""
a__ : Optional[Any] = """pixel_values"""
a__ : int = False
def UpperCamelCase__ ( self , __lowercase) -> None:
if isinstance(__lowercase , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowercase , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
__lowercase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowercase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , UpperCAmelCase_ , )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = True) -> Optional[Any]:
super().__init__(__lowercase)
__UpperCamelCase :List[str] = config
__UpperCamelCase :Any = 32
__UpperCamelCase :List[str] = max(int(depth * config.depth_multiplier) , config.min_depth)
__UpperCamelCase :Union[str, Any] = MobileNetVaConvLayer(
__lowercase , in_channels=config.num_channels , out_channels=__lowercase , kernel_size=3 , stride=2 , )
__UpperCamelCase :str = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__UpperCamelCase :Any = nn.ModuleList()
for i in range(13):
__UpperCamelCase :str = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__UpperCamelCase :Tuple = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
__lowercase , in_channels=__lowercase , out_channels=__lowercase , kernel_size=3 , stride=strides[i] , groups=__lowercase , ))
self.layer.append(
MobileNetVaConvLayer(
__lowercase , in_channels=__lowercase , out_channels=__lowercase , kernel_size=1 , ))
__UpperCamelCase :str = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowercase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase__ ( self , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
__UpperCamelCase :Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase :str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''')
__UpperCamelCase :int = self.conv_stem(__lowercase)
__UpperCamelCase :List[str] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
__UpperCamelCase :Optional[Any] = layer_module(__lowercase)
if output_hidden_states:
__UpperCamelCase :int = all_hidden_states + (hidden_states,)
__UpperCamelCase :Any = hidden_states
if self.pooler is not None:
__UpperCamelCase :str = torch.flatten(self.pooler(__lowercase) , start_dim=1)
else:
__UpperCamelCase :Tuple = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=__lowercase , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , UpperCAmelCase_ , )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase) -> None:
super().__init__(__lowercase)
__UpperCamelCase :int = config.num_labels
__UpperCamelCase :Optional[int] = MobileNetVaModel(__lowercase)
__UpperCamelCase :Optional[Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__UpperCamelCase :str = nn.Dropout(config.classifier_dropout_prob , inplace=__lowercase)
__UpperCamelCase :Dict = nn.Linear(__lowercase , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowercase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase__ ( self , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
__UpperCamelCase :List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase :Tuple = self.mobilenet_va(__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase)
__UpperCamelCase :List[str] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase :Union[str, Any] = self.classifier(self.dropout(__lowercase))
__UpperCamelCase :int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__UpperCamelCase :Tuple = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__UpperCamelCase :Union[str, Any] = '''single_label_classification'''
else:
__UpperCamelCase :Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
__UpperCamelCase :Any = MSELoss()
if self.num_labels == 1:
__UpperCamelCase :List[str] = loss_fct(logits.squeeze() , labels.squeeze())
else:
__UpperCamelCase :Dict = loss_fct(__lowercase , __lowercase)
elif self.config.problem_type == "single_label_classification":
__UpperCamelCase :Optional[int] = CrossEntropyLoss()
__UpperCamelCase :str = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__UpperCamelCase :Dict = BCEWithLogitsLoss()
__UpperCamelCase :List[str] = loss_fct(__lowercase , __lowercase)
if not return_dict:
__UpperCamelCase :Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states , )
| 105 | 0 |
from __future__ import annotations
__A : Optional[int] = []
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> bool:
'''simple docstring'''
for i in range(len(_UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase, -1, -1 ), range(_UpperCAmelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase, -1, -1 ), range(_UpperCAmelCase, len(_UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> bool:
'''simple docstring'''
if row >= len(_UpperCAmelCase ):
solution.append(_UpperCAmelCase )
printboard(_UpperCAmelCase )
print()
return True
for i in range(len(_UpperCAmelCase ) ):
if is_safe(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
lowerCAmelCase : str = 1
solve(_UpperCAmelCase, row + 1 )
lowerCAmelCase : Optional[Any] = 0
return False
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> None:
'''simple docstring'''
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(_UpperCAmelCase ) ):
if board[i][j] == 1:
print('Q', end=' ' )
else:
print('.', end=' ' )
print()
# n=int(input("The no. of queens"))
__A : int = 8
__A : Optional[int] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 138 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(_UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 138 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356 |
import os
from math import logaa
def lowerCamelCase__ ( A__ : str = "base_exp.txt" ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowerCamelCase, __lowerCamelCase = list(map(A__ , line.split(""",""" ) ) )
if x * logaa(A__ ) > largest:
__lowerCamelCase = x * logaa(A__ )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29 | 0 |
"""simple docstring"""
lowercase__ : List[str] = '''\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'''
lowercase__ : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase__ : Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 264 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ) ->bool:
"""simple docstring"""
a_ = set()
# Replace all the whitespace in our sentence
a_ = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCAmelCase ) == 26
def UpperCamelCase ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ) ->bool:
"""simple docstring"""
a_ = [False] * 26
for char in input_str:
if char.islower():
a_ = True
elif char.isupper():
a_ = True
return all(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ) ->bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase ( ) ->None:
"""simple docstring"""
from timeit import timeit
a_ = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=UpperCAmelCase ) )
print(timeit("is_pangram_faster()" , setup=UpperCAmelCase ) )
print(timeit("is_pangram_fastest()" , setup=UpperCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 243 | 0 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = CLIPConfig
__UpperCAmelCase : Optional[Any] = ["CLIPEncoderLayer"]
def __init__( self : Optional[Any] , lowerCamelCase : CLIPConfig ) -> int:
super().__init__(lowerCamelCase )
__snake_case : str = CLIPVisionModelWithProjection(config.vision_config )
__snake_case : Optional[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
__snake_case : List[str] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __snake_case ( self : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : int=0.5 , lowerCamelCase : List[str]=0.5 ) -> Dict:
__snake_case : Tuple = self.vision_model(lowerCamelCase )[0]
__snake_case : List[str] = self.p_head(lowerCamelCase )
__snake_case : str = nsfw_detected.flatten()
__snake_case : Optional[Any] = nsfw_detected > p_threshold
__snake_case : Optional[Any] = nsfw_detected.tolist()
if any(lowerCamelCase ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(lowerCamelCase ):
if nsfw_detected_:
__snake_case : Any = np.zeros(images[idx].shape )
__snake_case : Union[str, Any] = self.w_head(lowerCamelCase )
__snake_case : List[str] = watermark_detected.flatten()
__snake_case : Optional[int] = watermark_detected > w_threshold
__snake_case : List[str] = watermark_detected.tolist()
if any(lowerCamelCase ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(lowerCamelCase ):
if watermark_detected_:
__snake_case : Union[str, Any] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 134 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
__snake_case : List[Any] = MaskFormerConfig(backbone_config=__lowerCamelCase )
__snake_case : List[Any] = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
__snake_case : Any = 8_4_7
__snake_case : List[Any] = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
__snake_case : Optional[int] = 1_5_0
__snake_case : int = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
__snake_case : Optional[Any] = 1_7_1
__snake_case : List[str] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
__snake_case : Optional[int] = 1_3_3
__snake_case : int = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
__snake_case : Union[str, Any] = 1_9
__snake_case : Dict = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
__snake_case : Any = 6_5
__snake_case : Any = "mapillary-vistas-id2label.json"
__snake_case : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
__snake_case : Tuple = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = dct.pop(__lowerCamelCase )
__snake_case : Any = val
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__snake_case : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__snake_case : Tuple = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__snake_case : Tuple = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Tuple = in_proj_weight[:dim, :]
__snake_case : Tuple = in_proj_bias[: dim]
__snake_case : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
__snake_case : Tuple = in_proj_bias[
dim : dim * 2
]
__snake_case : str = in_proj_weight[
-dim :, :
]
__snake_case : Any = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# fmt: off
__snake_case : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__snake_case : List[str] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__snake_case : Union[str, Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Any = in_proj_weight[: hidden_size, :]
__snake_case : Optional[int] = in_proj_bias[:config.hidden_size]
__snake_case : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
__snake_case : Any = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case : Tuple = in_proj_weight[-hidden_size :, :]
__snake_case : Optional[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__snake_case : Optional[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__snake_case : Union[str, Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : int = in_proj_weight[: hidden_size, :]
__snake_case : Tuple = in_proj_bias[:config.hidden_size]
__snake_case : str = in_proj_weight[hidden_size : hidden_size * 2, :]
__snake_case : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case : Optional[Any] = in_proj_weight[-hidden_size :, :]
__snake_case : Tuple = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCAmelCase_ ( ):
__snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
__snake_case : Optional[int] = get_maskformer_config(__lowerCamelCase )
# load original state_dict
with open(__lowerCamelCase , "rb" ) as f:
__snake_case : int = pickle.load(__lowerCamelCase )
__snake_case : Optional[int] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__snake_case : Tuple = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__snake_case : int = torch.from_numpy(__lowerCamelCase )
# load 🤗 model
__snake_case : List[str] = MaskFormerForInstanceSegmentation(__lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCamelCase , param.shape )
__snake_case , __snake_case : List[str] = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__snake_case : Union[str, Any] = prepare_img()
if "vistas" in model_name:
__snake_case : Optional[int] = 6_5
elif "cityscapes" in model_name:
__snake_case : Optional[int] = 6_5_5_3_5
else:
__snake_case : Union[str, Any] = 2_5_5
__snake_case : Union[str, Any] = True if "ade" in model_name else False
__snake_case : str = MaskFormerImageProcessor(ignore_index=__lowerCamelCase , reduce_labels=__lowerCamelCase )
__snake_case : List[str] = image_processor(__lowerCamelCase , return_tensors="pt" )
__snake_case : Tuple = model(**__lowerCamelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__snake_case : Optional[Any] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 134 | 1 |
"""simple docstring"""
__A : List[Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__A : Dict = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__A : int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 33 | '''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a: List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ) -> None:
super().__init__(**__lowerCAmelCase )
lowercase__ : Optional[int] = size if size is not None else {'''height''': 384, '''width''': 384}
lowercase__ : Optional[Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase__ : Dict = do_resize
lowercase__ : int = size
lowercase__ : int = resample
lowercase__ : Tuple = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : int = do_normalize
lowercase__ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Tuple = do_convert_rgb
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray:
lowercase__ : Union[str, Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
lowercase__ : Any = (size['''height'''], size['''width'''])
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> str:
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray:
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ) -> PIL.Image.Image:
lowercase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Any = resample if resample is not None else self.resample
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Dict = image_mean if image_mean is not None else self.image_mean
lowercase__ : Dict = image_std if image_std is not None else self.image_std
lowercase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Optional[int] = size if size is not None else self.size
lowercase__ : int = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase__ : str = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Optional[Any] = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
lowercase__ : Tuple = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_rescale:
lowercase__ : List[str] = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
lowercase__ : Tuple = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
lowercase__ : List[str] = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowercase__ : Optional[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowerCAmelCase )
return encoded_outputs
| 198 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
a__ : Optional[Any] = ["""pixel_values"""]
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_55 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = 8 , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> np.ndarray:
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ) -> str:
__lowerCamelCase , __lowerCamelCase = get_image_size(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : str , ) -> str:
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
__lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 360 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : Any = TypeVar("KEY")
SCREAMING_SNAKE_CASE__ : Dict = TypeVar("VAL")
@dataclass(frozen=__lowercase , slots=__lowercase )
class lowerCAmelCase__ ( Generic[KEY, VAL] ):
a__ : KEY
a__ : VAL
class lowerCAmelCase__ ( _Item ):
def __init__( self : str ) -> None:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : Tuple ) -> bool:
return False
SCREAMING_SNAKE_CASE__ : List[Any] = _DeletedItem()
class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ) -> None:
__lowerCamelCase = initial_block_size
__lowerCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowerCamelCase = capacity_factor
__lowerCamelCase = 0
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ) -> int:
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int:
return (ind + 1) % len(self._buckets )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> bool:
__lowerCamelCase = self._buckets[ind]
if not stored:
__lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
__lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def __A ( self : Any ) -> bool:
__lowerCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
__lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCamelCase = self._buckets
__lowerCamelCase = [None] * new_size
__lowerCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __A ( self : str ) -> None:
self._resize(len(self._buckets ) * 2 )
def __A ( self : Dict ) -> None:
self._resize(len(self._buckets ) // 2 )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY ) -> Iterator[int]:
__lowerCamelCase = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowerCamelCase = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> None:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
__lowerCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> VAL:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : int ) -> int:
return self._len
def __iter__( self : Tuple ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ) -> str:
__lowerCamelCase = ''' ,'''.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 339 | 0 |
'''simple docstring'''
from math import factorial
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 100 ):
"""simple docstring"""
return sum(map(_lowercase , str(factorial(_lowercase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 200 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
a : List[str] = str(id_ )
a : Optional[Any] = None
a : Tuple = None
a : str = []
a : Any = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase__ ) -> Any:
return self.key < other.key
def __repr__( self ) -> Optional[Any]:
return self.id
def __a ( self , lowerCAmelCase__ ) -> Any:
self.neighbors.append(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Optional[Any] = weight
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : Union[str, Any] ) ->str:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowercase )
graph[b - 1].add_edge(graph[a - 1] , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : Vertex ) ->list:
'''simple docstring'''
a : int = []
for u in graph:
a : List[str] = math.inf
a : int = None
a : str = 0
a : Union[str, Any] = graph[:]
while q:
a : List[Any] = min(_lowercase )
q.remove(_lowercase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
a : List[Any] = u
a : Optional[int] = u.edges[v.id]
for i in range(1 , len(_lowercase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : Vertex ) ->Iterator[tuple]:
'''simple docstring'''
for u in graph:
a : str = math.inf
a : Dict = None
a : Dict = 0
a : List[Any] = list(_lowercase )
hq.heapify(_lowercase )
while h:
a : Dict = hq.heappop(_lowercase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
a : Dict = u
a : Optional[Any] = u.edges[v.id]
hq.heapify(_lowercase )
for i in range(1 , len(_lowercase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 0 |
'''simple docstring'''
import math
def lowerCamelCase__ ( _A ):
a : Union[str, Any] = [True] * n
a : int = False
a : List[str] = False
a : List[Any] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a : Any = i * 2
while index < n:
a : Any = False
a : Optional[Any] = index + i
a : str = [2]
for i in range(3 , _A , 2 ):
if is_prime[i]:
primes.append(_A )
return primes
def lowerCamelCase__ ( _A = 9999_6666_3333 ):
a : Optional[int] = math.floor(math.sqrt(_A ) ) + 100
a : Dict = prime_sieve(_A )
a : List[Any] = 0
a : Tuple = 0
a : int = primes[prime_index]
while (last_prime**2) <= limit:
a : Tuple = primes[prime_index + 1]
a : Union[str, Any] = last_prime**2
a : Optional[int] = next_prime**2
# Get numbers divisible by lps(current)
a : List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a : str = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a : List[str] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 365 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a__:
def __init__( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=2 , __snake_case : Union[str, Any]=8 , __snake_case : List[str]=True , __snake_case : Dict=True , __snake_case : Optional[Any]=True , __snake_case : List[str]=True , __snake_case : Tuple=99 , __snake_case : int=16 , __snake_case : Optional[int]=5 , __snake_case : int=2 , __snake_case : Tuple=36 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Tuple=5_12 , __snake_case : str=16 , __snake_case : str=2 , __snake_case : int=0.02 , __snake_case : Optional[int]=3 , __snake_case : List[Any]=4 , __snake_case : Any=None , ):
a : int = parent
a : Any = batch_size
a : Optional[int] = seq_length
a : List[str] = is_training
a : Dict = use_input_mask
a : Union[str, Any] = use_token_type_ids
a : Tuple = use_labels
a : Dict = vocab_size
a : Optional[int] = hidden_size
a : List[Any] = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : str = intermediate_size
a : Dict = hidden_act
a : str = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : Optional[Any] = max_position_embeddings
a : Tuple = type_vocab_size
a : int = type_sequence_label_size
a : List[Any] = initializer_range
a : List[str] = num_labels
a : List[str] = num_choices
a : Optional[Any] = scope
def lowercase_ ( self : Union[str, Any] ):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Optional[Any] = None
if self.use_input_mask:
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Tuple = None
if self.use_token_type_ids:
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : str = None
a : int = None
a : Any = None
if self.use_labels:
a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Union[str, Any] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def lowercase_ ( self : List[str] ):
a : List[Any] = self.get_config()
a : Optional[Any] = 3_00
return config
def lowercase_ ( self : Union[str, Any] ):
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Optional[Any] = self.prepare_config_and_inputs()
a : Union[str, Any] = True
a : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self : int , __snake_case : int , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Any ):
a : Dict = MraModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
a : List[str] = model(__snake_case , token_type_ids=__snake_case )
a : Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , __snake_case : Tuple , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[Any] , ):
a : Optional[Any] = True
a : Optional[int] = MraModel(__snake_case )
model.to(__snake_case )
model.eval()
a : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
a : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , )
a : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : Optional[Any] ):
a : Union[str, Any] = MraForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
a : List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Tuple , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : int ):
a : Optional[int] = MraForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Dict , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : str ):
a : Tuple = self.num_labels
a : Dict = MraForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a : Any = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : str , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : int ):
a : Tuple = self.num_labels
a : Tuple = MraForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
a : List[Any] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Any , __snake_case : Any , __snake_case : str , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Any , __snake_case : Any , __snake_case : str ):
a : Optional[int] = self.num_choices
a : int = MraForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : int = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : Optional[Any] ):
a : Union[str, Any] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = ()
def lowercase_ ( self : Any ):
a : Tuple = MraModelTester(self )
a : str = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowercase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : List[str] ):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : Any ):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a : Dict = type
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : List[Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def lowercase_ ( self : List[Any] ):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def lowercase_ ( self : Tuple ):
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = MraModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase_ ( self : Union[str, Any] ):
return
@require_torch
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : Union[str, Any] ):
a : Union[str, Any] = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
a : List[str] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
a : Optional[int] = model(__snake_case )[0]
a : Any = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , __snake_case )
a : str = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def lowercase_ ( self : Optional[int] ):
a : Dict = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
a : Optional[int] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
a : Dict = model(__snake_case )[0]
a : Union[str, Any] = 5_02_65
a : Dict = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , __snake_case )
a : Dict = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def lowercase_ ( self : Any ):
a : Dict = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
a : Optional[int] = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
a : Tuple = model(__snake_case )[0]
a : List[Any] = 5_02_65
a : str = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , __snake_case )
a : int = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) ) | 96 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_lowercase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''BeitFeatureExtractor''']
_lowercase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCAmelCase_ : Tuple = i + 1
else:
UpperCAmelCase_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 354 | '''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : int = nn.Parameter(torch.zeros(1 , snake_case_ ) )
UpperCAmelCase_ : str = nn.Parameter(torch.ones(1 , snake_case_ ) )
def _UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
UpperCAmelCase_ : int = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
UpperCAmelCase_ : Tuple = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (embeds * self.std) + self.mean
return embeds
| 274 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : List[Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Union[str, Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : List[Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Tuple ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Optional[int] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : int , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : List[str] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : str , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Dict , *lowerCAmelCase_ : str , **lowerCAmelCase_ : str ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : int , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Optional[int] , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Tuple ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Optional[Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : str ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Any , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Optional[Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Any , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : List[str] , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Union[str, Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Dict ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : List[str] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Any , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : str , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Tuple ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Optional[int] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Dict , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Dict , *lowerCAmelCase_ : str , **lowerCAmelCase_ : List[str] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Dict , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : List[str] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Optional[int] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : List[str] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Tuple , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : str , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Optional[int] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Tuple ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['sentencepiece']
def __init__( self : Optional[Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
| 134 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInpaintPipeline
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , )
A__ : Dict =PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
A__ : int =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A__ : str =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
A__ : Optional[int] =CLIPTextModel(lowerCAmelCase_ )
A__ : Dict =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : str ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]=0 ) -> List[str]:
'''simple docstring'''
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
A__ : List[str] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
A__ : List[str] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : List[str] =Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
A__ : int =Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
A__ : str =torch.manual_seed(lowerCAmelCase_ )
else:
A__ : Tuple =torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
A__ : Optional[Any] ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : str ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Tuple =self.get_dummy_components()
A__ : str =StableDiffusionInpaintPipeline(**lowerCAmelCase_ )
A__ : Any =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[Any] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Dict =sd_pipe(**lowerCAmelCase_ ).images
A__ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Optional[Any] =np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Union[str, Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
A__ : Optional[Any] ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : int =StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Dict ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : str =torch.manual_seed(0 )
A__ : Dict =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
A__ : Tuple =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : List[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : List[Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
A__ : int ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : List[Any] =StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Union[str, Any] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Union[str, Any] =torch.manual_seed(0 )
A__ : Dict =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
A__ : str =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Union[str, Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : List[str] ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : Any =PNDMScheduler.from_pretrained(lowerCAmelCase_ , subfolder="""scheduler""" )
A__ : Optional[int] =StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ : Dict ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Any =torch.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , )
A__ : Dict =torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 134 | 1 |
import os
def lowerCamelCase__ ( a ) -> List[str]:
_A: Dict = len(grid[0] )
_A: Union[str, Any] = len(a )
_A: int = 0
_A: int = 0
_A: Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(a ):
for j in range(n_rows - 3 ):
_A: Dict = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_A: str = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_A: Tuple = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_A: Optional[int] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_A: Optional[Any] = max(
a , a , a , a )
if max_product > largest:
_A: str = max_product
return largest
def lowerCamelCase__ ( ) -> Optional[Any]:
_A: Optional[Any] = []
with open(os.path.dirname(a ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
_A: Any = [[int(a ) for i in grid[j]] for j in range(len(a ) )]
return largest_product(a )
if __name__ == "__main__":
print(solution())
| 301 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase__ ( a , a = True , a = math.inf , a = -math.inf , a = math.inf , a = -math.inf , a = False , a = 1_00 , a = 0.01 , a = 1 , ) -> Any:
_A: Optional[Any] = False
_A: Dict = search_prob
_A: str = start_temperate
_A: Optional[int] = []
_A: int = 0
_A: Dict = None
while not search_end:
_A: Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_A: List[Any] = current_state
scores.append(a )
iterations += 1
_A: List[str] = None
_A: str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_A: Any = random.randint(0 , len(a ) - 1 ) # picking a random neighbor
_A: Union[str, Any] = neighbors.pop(a )
_A: List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_A: Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_A: str = picked_neighbor
else:
_A: Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_A: Optional[int] = picked_neighbor
_A: Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_A: Any = True
else:
_A: List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a ) , a )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
UpperCAmelCase__ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
| 301 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_a )
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_a = Features({'audio': Audio()} )
_a = Features({'labels': ClassLabel} )
_a = 'audio'
_a = 'labels'
def __lowercase ( self : Optional[Any] , lowerCAmelCase : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase = copy.deepcopy(self )
lowerCAmelCase = self.label_schema.copy()
lowerCAmelCase = features[self.label_column]
lowerCAmelCase = label_schema
return task_template
@property
def __lowercase ( self : int ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 155 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''open-llama'''
def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , A)
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}")
_UpperCAmelCase = self.rope_scaling.get('type' , A)
_UpperCAmelCase = self.rope_scaling.get('factor' , A)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 339 | 0 |
def __lowerCamelCase ( __a :int ) -> None:
"""simple docstring"""
A__ = generate_pascal_triangle(__a )
for row_idx in range(__a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def __lowerCamelCase ( __a :int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError("""The input value of \'num_rows\' should be \'int\'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of \'num_rows\' should be greater than or equal to 0""" )
A__ = []
for current_row_idx in range(__a ):
A__ = populate_current_row(__a , __a )
triangle.append(__a )
return triangle
def __lowerCamelCase ( __a :list[list[int]] , __a :int ) -> list[int]:
"""simple docstring"""
A__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A__ = 1, 1
for current_col_idx in range(1 , __a ):
calculate_current_element(
__a , __a , __a , __a )
return current_row
def __lowerCamelCase ( __a :list[list[int]] , __a :list[int] , __a :int , __a :int , ) -> None:
"""simple docstring"""
A__ = triangle[current_row_idx - 1][current_col_idx - 1]
A__ = triangle[current_row_idx - 1][current_col_idx]
A__ = above_to_left_elt + above_to_right_elt
def __lowerCamelCase ( __a :int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError("""The input value of \'num_rows\' should be \'int\'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of \'num_rows\' should be greater than or equal to 0""" )
A__ = [[1]]
for row_index in range(1 , __a ):
A__ = [0] + result[-1] + [0]
A__ = row_index + 1
# Calculate the number of distinct elements in a row
A__ = sum(divmod(__a , 2 ) )
A__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A__ = row_first_half + row_second_half
result.append(__a )
return result
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a :Callable , __a :int ) -> None:
A__ = F'{func.__name__}({value})'
A__ = timeit(F'__main__.{call}' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__a , __a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 361 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCAmelCase : WhisperForConditionalGeneration , __lowerCAmelCase : WhisperProcessor , __lowerCAmelCase : AutoencoderKL , __lowerCAmelCase : CLIPTextModel , __lowerCAmelCase : CLIPTokenizer , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCAmelCase : StableDiffusionSafetyChecker , __lowerCAmelCase : CLIPImageProcessor , ) -> List[str]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__lowerCAmelCase , speech_processor=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , )
def a_ ( self : Tuple , __lowerCAmelCase : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
A__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCAmelCase )
def a_ ( self : Any ) -> str:
"""simple docstring"""
self.enable_attention_slicing(__lowerCAmelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : int=1_60_00 , __lowerCAmelCase : int = 5_12 , __lowerCAmelCase : int = 5_12 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : float = 7.5 , __lowerCAmelCase : Optional[Union[str, List[str]]] = None , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , **__lowerCAmelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
A__ = self.speech_processor.feature_extractor(
__lowerCAmelCase , return_tensors="""pt""" , sampling_rate=__lowerCAmelCase ).input_features.to(self.device )
A__ = self.speech_model.generate(__lowerCAmelCase , max_length=48_00_00 )
A__ = self.speech_processor.tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , normalize=__lowerCAmelCase )[
0
]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = 1
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = len(__lowerCAmelCase )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__lowerCAmelCase )}.' )
# get prompt text embeddings
A__ = self.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
A__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
A__ = text_input_ids[:, : self.tokenizer.model_max_length]
A__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A__ , A__ , A__ = text_embeddings.shape
A__ = text_embeddings.repeat(1 , __lowerCAmelCase , 1 )
A__ = text_embeddings.view(bs_embed * num_images_per_prompt , __lowerCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A__ = 42
if negative_prompt is None:
A__ = [""""""] * batch_size
elif type(__lowerCAmelCase ) is not type(__lowerCAmelCase ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCAmelCase )} !='
f' {type(__lowerCAmelCase )}.' )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = [negative_prompt]
elif batch_size != len(__lowerCAmelCase ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(__lowerCAmelCase )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
A__ = negative_prompt
A__ = text_input_ids.shape[-1]
A__ = self.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" , )
A__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ = uncond_embeddings.shape[1]
A__ = uncond_embeddings.repeat(1 , __lowerCAmelCase , 1 )
A__ = uncond_embeddings.view(batch_size * num_images_per_prompt , __lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A__ = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device="""cpu""" , dtype=__lowerCAmelCase ).to(
self.device )
else:
A__ = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=__lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
A__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ = {}
if accepts_eta:
A__ = eta
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
A__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# predict the noise residual
A__ = self.unet(__lowerCAmelCase , __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A__ , A__ = noise_pred.chunk(2 )
A__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A__ = 1 / 0.1_8_2_1_5 * latents
A__ = self.vae.decode(__lowerCAmelCase ).sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__lowerCAmelCase , nsfw_content_detected=__lowerCAmelCase )
| 276 | 0 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self : Dict ):
snake_case_ = ''
snake_case_ = ''
snake_case_ = []
snake_case_ = 0
snake_case_ = 256
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
def A_ ( self : List[Any] , lowercase_ : Tuple ):
snake_case_ = cva.imread(lowercase_ , 0 )
snake_case_ = copy.deepcopy(self.img )
snake_case_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
snake_case_ = np.sum(lowercase_ )
for i in range(len(lowercase_ ) ):
snake_case_ = x[i] / self.k
self.sk += prk
snake_case_ = (self.L - 1) * self.sk
if self.rem != 0:
snake_case_ = int(last % last )
snake_case_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase_ )
snake_case_ = int(np.ma.count(self.img ) / self.img[1].size )
snake_case_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case_ = self.img[j][i]
if num != self.last_list[num]:
snake_case_ = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def A_ ( self : Union[str, Any] ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A_ ( self : Optional[Any] ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
a : Any = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
a : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 56 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DDIMPipeline
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ = False
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_lowerCamelCase : List[str] = DDIMScheduler()
_lowerCamelCase : Optional[int] = {'unet': unet, 'scheduler': scheduler}
return components
def A_ ( self , lowercase , lowercase=0 ):
if str(lowercase ).startswith('mps' ):
_lowerCamelCase : Dict = torch.manual_seed(lowercase )
else:
_lowerCamelCase : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase )
_lowerCamelCase : Tuple = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def A_ ( self ):
_lowerCamelCase : Any = 'cpu'
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : str = self.get_dummy_inputs(lowercase )
_lowerCamelCase : int = pipe(**lowercase ).images
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_lowerCamelCase : Tuple = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
_lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase , 1E-3 )
def A_ ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'google/ddpm-cifar10-32'
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : Dict = DDIMScheduler()
_lowerCamelCase : Dict = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddim.to(lowercase )
ddim.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : str = ddim(generator=lowercase , eta=0.0 , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase : List[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self ):
_lowerCamelCase : Optional[int] = 'google/ddpm-ema-bedroom-256'
_lowerCamelCase : str = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : str = DDIMScheduler.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddpm.to(lowercase )
ddpm.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : int = ddpm(generator=lowercase , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 96 | 0 |
"""simple docstring"""
from string import ascii_uppercase
__magic_name__ = {str(ord(c) - 55): c for c in ascii_uppercase}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while div != 1:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = divmod(UpperCamelCase_ , UpperCamelCase_ )
if base >= 11 and 9 < mod < 36:
__SCREAMING_SNAKE_CASE = ALPHABET_VALUES[str(UpperCamelCase_ )]
else:
__SCREAMING_SNAKE_CASE = str(UpperCamelCase_ )
new_value += actual_value
__SCREAMING_SNAKE_CASE = num // base
__SCREAMING_SNAKE_CASE = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 368 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
__SCREAMING_SNAKE_CASE = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
__SCREAMING_SNAKE_CASE = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCamelCase_ , 1 ):
if n < _p:
# then we have our last prime to check
__SCREAMING_SNAKE_CASE = primes[:idx]
break
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__SCREAMING_SNAKE_CASE = False
for r in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = pow(UpperCamelCase_ , d * 2**r , UpperCamelCase_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__SCREAMING_SNAKE_CASE = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _lowerCAmelCase ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 255 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self : Any ) -> List[str]:
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Optional[Any] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Optional[Any] , **__lowerCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : int ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) )
return image_input
def a_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def a_ ( self : str ) -> List[str]:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" )
A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """test"""
A__ = processor(text=__lowerCAmelCase )
A__ = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : List[str] ) -> str:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def a_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(__lowerCAmelCase )
A__ = tokenizer.batch_decode(__lowerCAmelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Dict ) -> int:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def a_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 274 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A : Tuple = logging.getLogger(__name__)
def __lowerCamelCase ( __a :Optional[int] , __a :List[str] ) -> Tuple:
"""simple docstring"""
A__ = np.argmax(__a , axis=1 )
return np.sum(outputs == labels )
def __lowerCamelCase ( __a :Tuple ) -> Dict:
"""simple docstring"""
with open(__a , encoding="""utf_8""" ) as f:
A__ = csv.reader(__a )
A__ = []
next(__a ) # skip the first line
for line in tqdm(__a ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowerCamelCase ( __a :Optional[int] , __a :List[Any] , __a :Dict , __a :Optional[Any] , __a :Optional[Any] , __a :int ) -> Union[str, Any]:
"""simple docstring"""
A__ = []
for dataset in encoded_datasets:
A__ = len(__a )
A__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
A__ = np.zeros((n_batch, 2) , dtype=np.intaa )
A__ = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
A__ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__a ):
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = with_conta
A__ = with_conta
A__ = len(__a ) - 1
A__ = len(__a ) - 1
A__ = with_conta
A__ = with_conta
A__ = mc_label
A__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) )
return tensor_datasets
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=__a , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=__a , type=__a , required=__a , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=__a , default="""""" )
parser.add_argument("""--eval_dataset""" , type=__a , default="""""" )
parser.add_argument("""--seed""" , type=__a , default=4_2 )
parser.add_argument("""--num_train_epochs""" , type=__a , default=3 )
parser.add_argument("""--train_batch_size""" , type=__a , default=8 )
parser.add_argument("""--eval_batch_size""" , type=__a , default=1_6 )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__a , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=__a , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=__a , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=__a , default=6.25E-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=__a , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=__a , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=__a , default=0.01 )
parser.add_argument("""--lm_coef""" , type=__a , default=0.9 )
parser.add_argument("""--n_valid""" , type=__a , default=3_7_4 )
parser.add_argument("""--server_ip""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
A__ = parser.parse_args()
print(__a )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
A__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A__ = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(__a , __a ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A__ = ["""_start_""", """_delimiter_""", """_classify_"""]
A__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__a )
A__ = tokenizer.convert_tokens_to_ids(__a )
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__a ) )
model.to(__a )
# Load and encode the datasets
def tokenize_and_encode(__a :Tuple ):
if isinstance(__a , __a ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) )
elif isinstance(__a , __a ):
return obj
return [tokenize_and_encode(__a ) for o in obj]
logger.info("""Encoding dataset...""" )
A__ = load_rocstories_dataset(args.train_dataset )
A__ = load_rocstories_dataset(args.eval_dataset )
A__ = (train_dataset, eval_dataset)
A__ = tokenize_and_encode(__a )
# Compute the max input length for the Transformer
A__ = model.config.n_positions // 2 - 2
A__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
A__ = min(__a , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A__ = pre_process_datasets(__a , __a , __a , *__a )
A__ , A__ = tensor_datasets[0], tensor_datasets[1]
A__ = TensorDataset(*__a )
A__ = RandomSampler(__a )
A__ = DataLoader(__a , sampler=__a , batch_size=args.train_batch_size )
A__ = TensorDataset(*__a )
A__ = SequentialSampler(__a )
A__ = DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A__ = args.max_steps
A__ = args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1
else:
A__ = len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs
A__ = list(model.named_parameters() )
A__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
A__ = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
A__ = AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon )
A__ = get_linear_schedule_with_warmup(
__a , num_warmup_steps=args.warmup_steps , num_training_steps=__a )
if args.do_train:
A__ , A__ , A__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
A__ = 0
A__ = 0
A__ = tqdm(__a , desc="""Training""" )
for step, batch in enumerate(__a ):
A__ = tuple(t.to(__a ) for t in batch )
A__ , A__ , A__ , A__ = batch
A__ = model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
A__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A__ = """Training loss: {:.2e} lr: {:.2e}""".format(__a , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A__ = model.module if hasattr(__a , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A__ = os.path.join(args.output_dir , __a )
A__ = os.path.join(args.output_dir , __a )
torch.save(model_to_save.state_dict() , __a )
model_to_save.config.to_json_file(__a )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
A__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__a )
if args.do_eval:
model.eval()
A__ , A__ = 0, 0
A__ , A__ = 0, 0
for batch in tqdm(__a , desc="""Evaluating""" ):
A__ = tuple(t.to(__a ) for t in batch )
A__ , A__ , A__ , A__ = batch
with torch.no_grad():
A__ , A__ , A__ , A__ = model(
__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
A__ = mc_logits.detach().cpu().numpy()
A__ = mc_labels.to("""cpu""" ).numpy()
A__ = accuracy(__a , __a )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
A__ = eval_loss / nb_eval_steps
A__ = eval_accuracy / nb_eval_examples
A__ = tr_loss / nb_tr_steps if args.do_train else None
A__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
A__ = os.path.join(args.output_dir , """eval_results.txt""" )
with open(__a , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , __a , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 274 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """dinat"""
_lowerCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowerCamelCase=4 , __lowerCamelCase=3 , __lowerCamelCase=64 , __lowerCamelCase=[3, 4, 6, 5] , __lowerCamelCase=[2, 4, 8, 16] , __lowerCamelCase=7 , __lowerCamelCase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __lowerCamelCase=3.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0 , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__A : Dict = patch_size
__A : Union[str, Any] = num_channels
__A : str = embed_dim
__A : Optional[Any] = depths
__A : int = len(__lowerCamelCase )
__A : Union[str, Any] = num_heads
__A : Tuple = kernel_size
__A : Optional[int] = dilations
__A : Tuple = mlp_ratio
__A : Optional[int] = qkv_bias
__A : int = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : int = drop_path_rate
__A : Dict = hidden_act
__A : Any = layer_norm_eps
__A : Tuple = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A : str = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
__A : List[Any] = layer_scale_init_value
__A : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
__A , __A : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 291 |
"""simple docstring"""
a_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
a_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowercase ( snake_case_ : float ,snake_case_ : str ,snake_case_ : str ) ->float:
'''simple docstring'''
__A : Tuple = from_type.lower().strip('''s''' )
__A : Optional[int] = to_type.lower().strip('''s''' )
__A : List[str] = UNIT_SYMBOL.get(snake_case_ ,snake_case_ )
__A : Any = UNIT_SYMBOL.get(snake_case_ ,snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__A : int = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(snake_case_ )}"""
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__A : str = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(snake_case_ )}"""
)
raise ValueError(snake_case_ )
__A : Optional[Any] = METRIC_CONVERSION[from_sanitized]
__A : Optional[int] = METRIC_CONVERSION[to_sanitized]
__A : Union[str, Any] = 1
if from_exponent > to_exponent:
__A : Dict = from_exponent - to_exponent
else:
__A : Union[str, Any] = -(to_exponent - from_exponent)
return value * pow(10 ,snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase_ :
'''simple docstring'''
@staticmethod
def A__ ( *snake_case_ , **snake_case_ ) -> Dict:
pass
def lowercase (_lowerCAmelCase ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE_ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
_snake_case = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = pipeline(
"""document-question-answering""" , model=snake_case_ , tokenizer=snake_case_ , image_processor=snake_case_ )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = list(zip(*apply_tesseract(load_image(snake_case_ ) , snake_case_ , """""" ) ) )
__lowerCAmelCase = """What is the placebo?"""
__lowerCAmelCase = [
{
"""image""": load_image(snake_case_ ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self , snake_case_ , snake_case_ ) -> Union[str, Any]:
__lowerCAmelCase = dqa_pipeline(snake_case_ , top_k=2 )
self.assertEqual(
snake_case_ , [
[
{"""score""": ANY(snake_case_ ), """answer""": ANY(snake_case_ ), """start""": ANY(snake_case_ ), """end""": ANY(snake_case_ )},
{"""score""": ANY(snake_case_ ), """answer""": ANY(snake_case_ ), """start""": ANY(snake_case_ ), """end""": ANY(snake_case_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self ) -> str:
__lowerCAmelCase = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = """How many cats are there?"""
__lowerCAmelCase = [
{"""score""": 0.0_001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0_001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(nested_simplify(snake_case_ , decimals=4 ) , snake_case_ )
__lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(snake_case_ , decimals=4 ) , snake_case_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(snake_case_ , [] )
# We can optionnally pass directly the words and bounding boxes
__lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , words=snake_case_ , boxes=snake_case_ , top_k=2 )
self.assertEqual(snake_case_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self ) -> str:
__lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = """What is the invoice number?"""
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = """What is the invoice number?"""
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self ) -> int:
__lowerCAmelCase = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=snake_case_ )
__lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=snake_case_ , revision="""3dc6de3""" , )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = """What is the invoice number?"""
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
__lowerCAmelCase = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
__lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
__lowerCAmelCase = list(zip(*apply_tesseract(load_image(snake_case_ ) , snake_case_ , """""" ) ) )
# This model should also work if `image` is set to None
__lowerCAmelCase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self ) -> str:
__lowerCAmelCase = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=snake_case_ )
__lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=snake_case_ , revision="""3dc6de3""" , max_seq_len=50 , )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = """What is the invoice number?"""
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__lowerCAmelCase = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
__lowerCAmelCase = list(zip(*apply_tesseract(load_image(snake_case_ ) , snake_case_ , """""" ) ) )
# This model should also work if `image` is set to None
__lowerCAmelCase = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def A__ ( self ) -> str:
__lowerCAmelCase = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
__lowerCAmelCase = INVOICE_URL
__lowerCAmelCase = """What is the invoice number?"""
__lowerCAmelCase = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(nested_simplify(snake_case_ , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def A__ ( self ) -> int:
pass
| 301 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def A__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , snake_case_ ) -> List[Any]:
__lowerCAmelCase = """this is a test"""
__lowerCAmelCase = """this is a test"""
return input_text, output_text
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(snake_case_ ) , 30_001 )
def A__ ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> int:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def A__ ( self ) -> Dict:
pass
def A__ ( self ) -> List[str]:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Dict:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Tuple:
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Any:
# fmt: off
__lowerCAmelCase = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , do_lower_case=snake_case_ , split_by_punct=snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = """This is a test"""
__lowerCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
__lowerCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = DebertaVaTokenizerFast(snake_case_ , keep_accents=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# fmt: off
__lowerCAmelCase = """I was born in 92000, and this is falsé."""
__lowerCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__lowerCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = DebertaVaTokenizer(snake_case_ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case_ , )
@slow
def A__ ( self ) -> int:
# fmt: off
__lowerCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 301 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '''▁'''
__A = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__A = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__A = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
__A = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : str = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = ["input_ids", "attention_mask"]
UpperCamelCase_ : List[int] = []
UpperCamelCase_ : List[int] = []
def __init__( self : str , A__ : str , A__ : Optional[Any] , A__ : Union[str, Any]=None , A__ : Dict=None , A__ : Any="<s>" , A__ : Union[str, Any]="</s>" , A__ : Tuple="</s>" , A__ : Dict="<pad>" , A__ : List[Any]="<unk>" , A__ : str="m2m100" , A__ : Optional[Dict[str, Any]] = None , A__ : List[Any]=8 , **A__ : Union[str, Any] , ) -> None:
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
_snake_case = language_codes
_snake_case = FAIRSEQ_LANGUAGE_CODES[language_codes]
_snake_case = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
_snake_case = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A__ )
for lang_code in fairseq_language_code
if self.get_lang_token(A__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A__ , tgt_lang=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , unk_token=A__ , pad_token=A__ , language_codes=A__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A__ , **A__ , )
_snake_case = vocab_file
_snake_case = load_json(A__ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = spm_file
_snake_case = load_spm(A__ , self.sp_model_kwargs )
_snake_case = len(self.encoder )
_snake_case = {
self.get_lang_token(A__ ): self.encoder_size + i for i, lang_code in enumerate(A__ )
}
_snake_case = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A__ )}
_snake_case = {v: k for k, v in self.lang_token_to_id.items()}
_snake_case = src_lang if src_lang is not None else '''en'''
_snake_case = tgt_lang
_snake_case = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_snake_case = num_madeup_words
@property
def UpperCamelCase_ ( self : int ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase_ ( self : Dict ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : List[str] , A__ : str ) -> None:
_snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : Any , A__ : str ) -> List[str]:
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCamelCase_ ( self : Optional[int] , A__ : Dict ) -> str:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A__ , self.encoder[self.unk_token] )
def UpperCamelCase_ ( self : Union[str, Any] , A__ : int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A__ , self.unk_token )
def UpperCamelCase_ ( self : Optional[int] , A__ : Optional[int] ) -> List[Any]:
_snake_case = []
_snake_case = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__ ) + token
_snake_case = []
else:
current_sub_tokens.append(A__ )
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def UpperCamelCase_ ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
_snake_case = [1] * len(self.prefix_tokens )
_snake_case = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def UpperCamelCase_ ( self : Tuple , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : str ) -> Dict:
_snake_case = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Dict:
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : Union[str, Any] , A__ : Dict ) -> None:
_snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case = {}
_snake_case = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase_ ( self : Any , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
_snake_case = Path(A__ )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
_snake_case = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_snake_case = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , A__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A__ )
elif not os.path.isfile(self.spm_file ):
with open(A__ , '''wb''' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (str(A__ ), str(A__ ))
def UpperCamelCase_ ( self : Optional[int] , A__ : List[str] , A__ : str = "en" , A__ : Optional[List[str]] = None , A__ : str = "ro" , **A__ : List[Any] , ) -> BatchEncoding:
_snake_case = src_lang
_snake_case = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def UpperCamelCase_ ( self : List[str] , A__ : int , A__ : Optional[str] , A__ : Optional[str] , **A__ : Union[str, Any] ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_snake_case = src_lang
_snake_case = self(A__ , add_special_tokens=A__ , **A__ )
_snake_case = self.get_lang_id(A__ )
_snake_case = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : Dict ) -> Optional[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : Optional[Any] ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : List[Any] , A__ : str ) -> None:
_snake_case = self.get_lang_token(A__ )
_snake_case = self.lang_token_to_id[lang_token]
_snake_case = [self.cur_lang_id]
_snake_case = [self.eos_token_id]
def UpperCamelCase_ ( self : List[str] , A__ : str ) -> None:
_snake_case = self.get_lang_token(A__ )
_snake_case = self.lang_token_to_id[lang_token]
_snake_case = [self.cur_lang_id]
_snake_case = [self.eos_token_id]
def UpperCamelCase_ ( self : Dict , A__ : str ) -> str:
return self.lang_code_to_token[lang]
def UpperCamelCase_ ( self : Tuple , A__ : str ) -> int:
_snake_case = self.get_lang_token(A__ )
return self.lang_token_to_id[lang_token]
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
_snake_case = sentencepiece.SentencePieceProcessor(**_UpperCamelCase )
spm.Load(str(_UpperCamelCase ) )
return spm
def snake_case_(_UpperCamelCase ) -> Union[Dict, List]:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' ) as f:
return json.load(_UpperCamelCase )
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=2 )
| 278 |
from math import factorial
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(_UpperCamelCase ) // (factorial(_UpperCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'''If a class of 40 students must be arranged into groups of''',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f'''are {combinations(10, 3)} ways that first, second and''',
'''third place can be awarded.''',
)
| 278 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : PriorityQueue , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCAmelCase = cst_fwd.get(SCREAMING_SNAKE_CASE_ , np.inf )
__lowerCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCAmelCase = new_cost_f
__lowerCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ):
__lowerCAmelCase = -1
__lowerCAmelCase = set()
__lowerCAmelCase = set()
__lowerCAmelCase = {source: 0}
__lowerCAmelCase = {destination: 0}
__lowerCAmelCase = {source: None}
__lowerCAmelCase = {destination: None}
__lowerCAmelCase = PriorityQueue()
__lowerCAmelCase = PriorityQueue()
__lowerCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCAmelCase , __lowerCAmelCase = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCAmelCase = shortest_distance
return shortest_path_distance
UpperCamelCase__ = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
UpperCamelCase__ = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Tuple = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "roc_bert"
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE :List[str]=7_6_8 , SCREAMING_SNAKE_CASE :Dict=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE :List[Any]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Optional[int]="absolute" , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :Optional[Any]=9_1_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE :str=2_4_8_5_8 , SCREAMING_SNAKE_CASE :List[Any]=True , **SCREAMING_SNAKE_CASE :Tuple , ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =vocab_size
_a : List[str] =max_position_embeddings
_a : Optional[Any] =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[str] =num_attention_heads
_a : int =intermediate_size
_a : Any =hidden_act
_a : Dict =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : str =initializer_range
_a : Optional[int] =type_vocab_size
_a : Any =layer_norm_eps
_a : Any =use_cache
_a : Optional[int] =enable_pronunciation
_a : Optional[Any] =enable_shape
_a : Optional[Any] =pronunciation_embed_dim
_a : Tuple =pronunciation_vocab_size
_a : Union[str, Any] =shape_embed_dim
_a : Any =shape_vocab_size
_a : Tuple =concat_input
_a : List[str] =position_embedding_type
_a : List[str] =classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 276 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, **lowercase_ ) -> List[str]:
super().__init__(**lowercase_ )
requires_backends(self, 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self, lowercase_, **lowercase_ ) -> Union[str, Any]:
return super().__call__(lowercase_, **lowercase_ )
def _lowerCamelCase ( self, **lowercase_ ) -> Union[str, Any]:
snake_case = {}
if "candidate_labels" in kwargs:
snake_case = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
snake_case = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _lowerCamelCase ( self, lowercase_, lowercase_=None, lowercase_="This is a photo of {}." ) -> Optional[int]:
snake_case = load_image(lowercase_ )
snake_case = self.image_processor(images=[image], return_tensors=self.framework )
snake_case = candidate_labels
snake_case = [hypothesis_template.format(lowercase_ ) for x in candidate_labels]
snake_case = self.tokenizer(lowercase_, return_tensors=self.framework, padding=lowercase_ )
snake_case = [text_inputs]
return inputs
def _lowerCamelCase ( self, lowercase_ ) -> Optional[int]:
snake_case = model_inputs.pop('candidate_labels' )
snake_case = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0], lowercase_ ):
snake_case = text_inputs[0]
else:
# Batching case.
snake_case = text_inputs[0][0]
snake_case = self.model(**lowercase_, **lowercase_ )
snake_case = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self, lowercase_ ) -> int:
snake_case = model_outputs.pop('candidate_labels' )
snake_case = model_outputs['logits'][0]
if self.framework == "pt":
snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
snake_case = probs.tolist()
if not isinstance(lowercase_, lowercase_ ):
snake_case = [scores]
elif self.framework == "tf":
snake_case = stable_softmax(lowercase_, axis=-1 )
snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowercase_, lowercase_ ), key=lambda lowercase_ : -x[0] )
]
return result
| 362 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 0 |
'''simple docstring'''
_lowerCAmelCase = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_lowerCAmelCase = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_lowerCAmelCase = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_lowerCAmelCase = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_lowerCAmelCase = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_lowerCAmelCase = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_lowerCAmelCase = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_lowerCAmelCase = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 298 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('Input value must be an \'int\' type' )
lowercase : str = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 255 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : Tuple = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
_lowerCamelCase : Any = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __snake_case (_a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self : List[Any] , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[str]="<|endoftext|>" , _UpperCAmelCase : List[Any]="<|endoftext|>" , _UpperCAmelCase : int="<|endoftext|>" , _UpperCAmelCase : Optional[int]=False , **_UpperCAmelCase : Optional[Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
_lowerCAmelCase : str = kwargs.pop("""add_bos_token""" , _UpperCAmelCase )
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _UpperCAmelCase ) != add_prefix_space:
_lowerCAmelCase : int = getattr(_UpperCAmelCase , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : Optional[Any] = pre_tok_class(**_UpperCAmelCase )
_lowerCAmelCase : List[str] = add_prefix_space
def SCREAMING_SNAKE_CASE ( self : str , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Optional[int] ) -> BatchEncoding:
'''simple docstring'''
_lowerCAmelCase : str = kwargs.get("""is_split_into_words""" , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ) -> BatchEncoding:
'''simple docstring'''
_lowerCAmelCase : List[str] = kwargs.get("""is_split_into_words""" , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : "Conversation" ) -> List[int]:
'''simple docstring'''
_lowerCAmelCase : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] )
if len(_UpperCAmelCase ) > self.model_max_length:
_lowerCAmelCase : Any = input_ids[-self.model_max_length :]
return input_ids
| 159 |
from __future__ import annotations
from typing import Generic, TypeVar
_lowerCamelCase : Dict = TypeVar("T")
class __snake_case (Generic[T] ):
def __init__( self : Dict , _UpperCAmelCase : T ) -> None:
'''simple docstring'''
_lowerCAmelCase : List[Any] = data
_lowerCAmelCase : str = self
_lowerCAmelCase : Tuple = 0
class __snake_case (Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
_lowerCAmelCase : dict[T, DisjointSetTreeNode[T]] = {}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : T ) -> None:
'''simple docstring'''
_lowerCAmelCase : int = DisjointSetTreeNode(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : T ) -> DisjointSetTreeNode[T]:
'''simple docstring'''
_lowerCAmelCase : List[str] = self.map[data]
if elem_ref != elem_ref.parent:
_lowerCAmelCase : Union[str, Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : DisjointSetTreeNode[T] , _UpperCAmelCase : DisjointSetTreeNode[T] ) -> None:
'''simple docstring'''
if nodea.rank > nodea.rank:
_lowerCAmelCase : Dict = nodea
else:
_lowerCAmelCase : Union[str, Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : T , _UpperCAmelCase : T ) -> None:
'''simple docstring'''
self.link(self.find_set(_UpperCAmelCase ) , self.find_set(_UpperCAmelCase ) )
class __snake_case (Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
_lowerCAmelCase : dict[T, dict[T, int]] = {}
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : T ) -> None:
'''simple docstring'''
if node not in self.connections:
_lowerCAmelCase : int = {}
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : T , _UpperCAmelCase : T , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
_lowerCAmelCase : Any = weight
_lowerCAmelCase : int = weight
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> GraphUndirectedWeighted[T]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Union[str, Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _UpperCAmelCase : x[2] )
# creating the disjoint set
_lowerCAmelCase : Dict = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_UpperCAmelCase )
# MST generation
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Any = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = edges[index]
index += 1
_lowerCAmelCase : Dict = disjoint_set.find_set(_UpperCAmelCase )
_lowerCAmelCase : List[str] = disjoint_set.find_set(_UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
disjoint_set.union(_UpperCAmelCase , _UpperCAmelCase )
return graph
| 159 | 1 |
"""simple docstring"""
import argparse
lowerCAmelCase : str = """docs/source/_static/js/custom.js"""
def a__ ( snake_case__ ) -> List[str]:
with open(snake_case__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase = f.readlines()
lowerCamelCase = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowerCamelCase = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(snake_case__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
lowerCAmelCase : Optional[Any] = parser.parse_args()
update_custom_js(args.version)
| 291 |
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ ) -> bool:
if len(snake_case__ ) == 0:
return False
lowerCamelCase = len(snake_case__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case__ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")]
lowerCAmelCase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCAmelCase : Union[str, Any] = """""" if binary_search(sequence, target) else """not """
print(F"""{target} was {not_str}found in {sequence}""")
| 291 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 362 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_A = """__DUMMY_TRANSFORMERS_USER__"""
_A = """Dummy User"""
_A = """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"""
_A = """https://hub-ci.huggingface.co"""
_A = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
_A = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
_A = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def a__ ( lowerCAmelCase ) -> Union[str, Any]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase ) -> List[Any]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , lowerCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase ) -> List[Any]:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
HfFolder.save_token(lowerCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def a__ ( ) -> List[str]:
return HfApi(endpoint=lowerCAmelCase )
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : List[str] = HfFolder.get_token()
HfFolder.save_token(lowerCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase ) -> List[str]:
def _cleanup_repo(lowerCAmelCase ):
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def a__ ( lowerCAmelCase ) -> Optional[Any]:
@contextmanager
def _temporary_repo(lowerCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(lowerCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : str = F"""repo_txt_data-{int(time.time() * 10E3 )}"""
UpperCAmelCase__ : List[str] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
UpperCAmelCase__ : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10E3 )}"""
UpperCAmelCase__ : Any = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : Union[str, Any] = F"""repo_zipped_img_data-{int(time.time() * 10E3 )}"""
UpperCAmelCase__ : Optional[int] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return hf_private_dataset_repo_zipped_img_data_
| 166 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_A = logging.get_logger(__name__)
_A = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class A ( __UpperCAmelCase ):
__snake_case = 'bloom'
__snake_case = ['past_key_values']
__snake_case = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self, UpperCamelCase__=25_0880, UpperCamelCase__=64, UpperCamelCase__=2, UpperCamelCase__=8, UpperCamelCase__=1E-5, UpperCamelCase__=0.02, UpperCamelCase__=True, UpperCamelCase__=1, UpperCamelCase__=2, UpperCamelCase__=False, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=1, UpperCamelCase__=False, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase_ = kwargs.pop('''n_embed''', UpperCamelCase__ )
lowerCAmelCase_ = hidden_size if n_embed is None else n_embed
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = layer_norm_epsilon
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = pretraining_tp
lowerCAmelCase_ = apply_residual_connection_post_layernorm
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = slow_but_exact
super().__init__(bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
class A ( __UpperCAmelCase ):
__snake_case = version.parse('1.12' )
def __init__( self, UpperCamelCase__, UpperCamelCase__ = "default", UpperCamelCase__ = None, UpperCamelCase__ = False, ):
"""simple docstring"""
super().__init__(UpperCamelCase__, task=UpperCamelCase__, patching_specs=UpperCamelCase__, use_past=UpperCamelCase__ )
if not getattr(self._config, '''pad_token_id''', UpperCamelCase__ ):
# TODO: how to do that better?
lowerCAmelCase_ = 0
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(UpperCamelCase__, direction='''inputs''', inverted_values_shape=UpperCamelCase__ )
lowerCAmelCase_ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._config.n_head
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-3
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = -1, UpperCamelCase__ = -1, UpperCamelCase__ = False, UpperCamelCase__ = None, ):
"""simple docstring"""
lowerCAmelCase_ = super(UpperCamelCase__, self ).generate_dummy_inputs(
UpperCamelCase__, batch_size=UpperCamelCase__, seq_length=UpperCamelCase__, is_pair=UpperCamelCase__, framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase_ = seqlen + 2
lowerCAmelCase_ = self._config.hidden_size // self.num_attention_heads
lowerCAmelCase_ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowerCAmelCase_ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowerCAmelCase_ = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
lowerCAmelCase_ = common_inputs['''attention_mask''']
if self.use_past:
lowerCAmelCase_ = ordered_inputs['''attention_mask'''].dtype
lowerCAmelCase_ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(UpperCamelCase__, UpperCamelCase__, dtype=UpperCamelCase__ )], dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 13
| 278 |
def __UpperCamelCase ( _A = 1000000 ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = {1: 1}
for inputa in range(2 , _A ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ = counter
if counter > pre_counter:
lowerCAmelCase_ = inputa
lowerCAmelCase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 278 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _a :
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]=13 , _SCREAMING_SNAKE_CASE : List[str]=64 , _SCREAMING_SNAKE_CASE : int=2 , _SCREAMING_SNAKE_CASE : List[str]=3 , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : List[str]=32 , _SCREAMING_SNAKE_CASE : List[Any]=5 , _SCREAMING_SNAKE_CASE : Optional[Any]=4 , _SCREAMING_SNAKE_CASE : Optional[Any]=37 , _SCREAMING_SNAKE_CASE : Optional[int]="gelu" , _SCREAMING_SNAKE_CASE : List[Any]=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=0.1 , _SCREAMING_SNAKE_CASE : str=10 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 16, 4, 4] , _SCREAMING_SNAKE_CASE : Optional[Any]=None , )-> Any:
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : List[Any] = patch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : List[Any] = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : Dict = scope
lowerCAmelCase__ : int = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase__ : Optional[int] = (self.image_size // 32) ** 2
lowerCAmelCase__ : Any = num_patches + 1
def UpperCAmelCase__( self : Optional[Any] )-> List[Any]:
lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__( self : Tuple )-> Union[str, Any]:
lowerCAmelCase__ : Any = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] )-> Optional[Any]:
lowerCAmelCase__ : Any = ViTHybridModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int )-> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = self.type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = ViTHybridForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__( self : List[Any] )-> Tuple:
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = config_and_inputs
lowerCAmelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( _lowercase , _lowercase , unittest.TestCase):
_a : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_a : Tuple = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_a : Dict = False
_a : Optional[int] = False
_a : Optional[int] = False
def UpperCAmelCase__( self : Dict )-> Any:
lowerCAmelCase__ : Optional[Any] = ViTHybridModelTester(self )
lowerCAmelCase__ : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__( self : Optional[int] )-> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCAmelCase__( self : int )-> int:
pass
def UpperCAmelCase__( self : Optional[int] )-> int:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__( self : int )-> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] )-> Tuple:
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict )-> int:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Dict = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase__ : str = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase__( self : List[str] )-> str:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Tuple = ViTHybridModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase):
@cached_property
def UpperCAmelCase__( self : Union[str, Any] )-> Dict:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__( self : List[str] )-> Any:
lowerCAmelCase__ : List[Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = self.default_image_processor
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : int = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
lowerCAmelCase__ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
@require_accelerate
def UpperCAmelCase__( self : str )-> Tuple:
lowerCAmelCase__ : Optional[Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
lowerCAmelCase__ : List[str] = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowerCAmelCase__ : str = model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase__ : Union[str, Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 211 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
lowerCamelCase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {f'''funnel-transformer/{name}''': 512 for name in _model_names}
lowerCamelCase = {f'''funnel-transformer/{name}''': {'''do_lower_case''': True} for name in _model_names}
class _a ( _lowercase):
_a : Tuple = VOCAB_FILES_NAMES
_a : Dict = PRETRAINED_VOCAB_FILES_MAP
_a : Dict = PRETRAINED_INIT_CONFIGURATION
_a : Union[str, Any] = FunnelTokenizer
_a : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int = 2
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Any="<unk>" , _SCREAMING_SNAKE_CASE : Dict="<sep>" , _SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , _SCREAMING_SNAKE_CASE : str="<cls>" , _SCREAMING_SNAKE_CASE : List[str]="<mask>" , _SCREAMING_SNAKE_CASE : Optional[int]="<s>" , _SCREAMING_SNAKE_CASE : Dict="</s>" , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : str="##" , **_SCREAMING_SNAKE_CASE : List[str] , )-> List[str]:
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , clean_text=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , wordpieces_prefix=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
lowerCAmelCase__ : int = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''' ) )
lowerCAmelCase__ : Dict = do_lower_case
lowerCAmelCase__ : str = strip_accents
lowerCAmelCase__ : Dict = tokenize_chinese_chars
lowerCAmelCase__ : str = normalizer_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = do_lower_case
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]=None )-> Optional[int]:
lowerCAmelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None )-> Tuple[str]:
lowerCAmelCase__ : Any = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 211 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class A ( _lowerCAmelCase ):
__UpperCAmelCase : Union[str, Any] = "data2vec-vision"
def __init__(self : Optional[int] , __UpperCAmelCase : Optional[int]=7_6_8 , __UpperCAmelCase : Tuple=1_2 , __UpperCAmelCase : str=1_2 , __UpperCAmelCase : str=3_0_7_2 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Optional[Any]=1E-12 , __UpperCAmelCase : List[Any]=2_2_4 , __UpperCAmelCase : Dict=1_6 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=[3, 5, 7, 1_1] , __UpperCAmelCase : List[Any]=[1, 2, 3, 6] , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Tuple=0.4 , __UpperCAmelCase : Optional[Any]=2_5_6 , __UpperCAmelCase : str=1 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Union[str, Any]=2_5_5 , **__UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_lowercase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = use_mask_token
UpperCAmelCase__ = use_absolute_position_embeddings
UpperCAmelCase__ = use_relative_position_bias
UpperCAmelCase__ = use_shared_relative_position_bias
UpperCAmelCase__ = layer_scale_init_value
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase__ = out_indices
UpperCAmelCase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase__ = use_auxiliary_head
UpperCAmelCase__ = auxiliary_loss_weight
UpperCAmelCase__ = auxiliary_channels
UpperCAmelCase__ = auxiliary_num_convs
UpperCAmelCase__ = auxiliary_concat_input
UpperCAmelCase__ = semantic_loss_ignore_index
class A ( _lowerCAmelCase ):
__UpperCAmelCase : int = version.parse('1.11' )
@property
def lowercase_ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ (self : Dict ) -> List[Any]:
"""simple docstring"""
return 1E-4
| 65 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Dict , _lowercase : Union[str, Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__UpperCAmelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowercase )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : List[str] ):
__UpperCAmelCase = '''sgugger/tiny-distilbert-classification'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , torchscript=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , fpaa=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : int ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
# set architectures equal to `None`
__UpperCAmelCase = None
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Tuple ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowercase , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Any ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tinier_bart'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : int ):
__UpperCAmelCase = '''sshleifer/tinier_bart'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowercase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowercase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() )
def a ( self : List[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowercase : str ):
self.assertTrue(hasattr(_lowercase , '''sequential''' ) )
self.assertTrue(hasattr(_lowercase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowercase , '''current''' ) )
self.assertTrue(hasattr(_lowercase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
| 332 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase_ : List[str] = 1
@register_to_config
def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
snake_case_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
snake_case_ = 4
# running values
snake_case_ = []
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[Any]:
snake_case_ = num_inference_steps
snake_case_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
snake_case_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
snake_case_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
snake_case_ = torch.sin(steps * math.pi / 2 ) ** 2
snake_case_ = (1.0 - self.betas**2) ** 0.5
snake_case_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
snake_case_ = timesteps.to(__snake_case )
snake_case_ = []
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler""" )
snake_case_ = (self.timesteps == timestep).nonzero().item()
snake_case_ = timestep_index + 1
snake_case_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
snake_case_ = self.ets[-1]
elif len(self.ets ) == 2:
snake_case_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
snake_case_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
snake_case_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
snake_case_ = self._get_prev_sample(__snake_case , __snake_case , __snake_case , __snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCAmelCase_ ( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> torch.FloatTensor:
return sample
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
snake_case_ = self.alphas[timestep_index]
snake_case_ = self.betas[timestep_index]
snake_case_ = self.alphas[prev_timestep_index]
snake_case_ = self.betas[prev_timestep_index]
snake_case_ = (sample - sigma * ets) / max(__snake_case , 1e-8 )
snake_case_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps | 370 |
from torch import nn
def UpperCamelCase( lowercase_ ) -> Tuple:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 34 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE :int = TypeVar('''T''')
class __lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] , _lowerCAmelCase : T ) -> None:
"""simple docstring"""
snake_case_ = data
snake_case_ = self
snake_case_ = 0
class __lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
"""simple docstring"""
# map from node name to the node object
snake_case_ = {}
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : T ) -> None:
"""simple docstring"""
# create a new set with x as its member
snake_case_ = DisjointSetTreeNode(_lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : T ) -> DisjointSetTreeNode[T]:
"""simple docstring"""
# find the set x belongs to (with path-compression)
snake_case_ = self.map[data]
if elem_ref != elem_ref.parent:
snake_case_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : DisjointSetTreeNode[T] , _lowerCAmelCase : DisjointSetTreeNode[T] ) -> None:
"""simple docstring"""
# helper function for union operation
if nodea.rank > nodea.rank:
snake_case_ = nodea
else:
snake_case_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : T , _lowerCAmelCase : T ) -> None:
"""simple docstring"""
# merge 2 disjoint sets
self.link(self.find_set(_lowerCAmelCase ) , self.find_set(_lowerCAmelCase ) )
class __lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[Any] ) -> None:
"""simple docstring"""
# connections: map from the node to the neighbouring nodes (with weights)
snake_case_ = {}
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : T ) -> None:
"""simple docstring"""
# add a node ONLY if its not present in the graph
if node not in self.connections:
snake_case_ = {}
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : T , _lowerCAmelCase : T , _lowerCAmelCase : int ) -> None:
"""simple docstring"""
# add an edge with the given weight
self.add_node(_lowerCAmelCase )
self.add_node(_lowerCAmelCase )
snake_case_ = weight
snake_case_ = weight
def lowerCAmelCase__ ( self : Dict ) -> GraphUndirectedWeighted[T]:
"""simple docstring"""
snake_case_ = []
snake_case_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _lowerCAmelCase : x[2] )
# creating the disjoint set
snake_case_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_lowerCAmelCase )
# MST generation
snake_case_ = 0
snake_case_ = 0
snake_case_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case_ , snake_case_ , snake_case_ = edges[index]
index += 1
snake_case_ = disjoint_set.find_set(_lowerCAmelCase )
snake_case_ = disjoint_set.find_set(_lowerCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
disjoint_set.union(_lowerCAmelCase , _lowerCAmelCase )
return graph
| 159 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _lowerCAmelCase ( )->Any:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=lowerCAmelCase_ , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=lowerCAmelCase_ , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=lowerCAmelCase_ , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=lowerCAmelCase_ , default=0 , help="cuda_id." , )
snake_case_ = parser.parse_args()
return args
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :Union[str, Any] )->Union[str, Any]:
'''simple docstring'''
if not len(lowerCAmelCase_ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
snake_case_ , snake_case_ = imgs[0].size
snake_case_ = Image.new("RGB" , size=(cols * w, rows * h) )
snake_case_ , snake_case_ = grid.size
for i, img in enumerate(lowerCAmelCase_ ):
grid.paste(lowerCAmelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Union[str, Any]="robotic cat with wings" , lowerCAmelCase_ :Any=7.5 , lowerCAmelCase_ :Dict=50 , lowerCAmelCase_ :int=1 , lowerCAmelCase_ :Union[str, Any]=42 , )->str:
'''simple docstring'''
snake_case_ = torch.Generator(pipeline.device ).manual_seed(lowerCAmelCase_ )
snake_case_ = pipeline(
lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , ).images
snake_case_ = int(math.sqrt(lowerCAmelCase_ ) )
snake_case_ = image_grid(lowerCAmelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
SCREAMING_SNAKE_CASE :Dict = parse_args()
# Load models and create wrapper for stable diffusion
SCREAMING_SNAKE_CASE :Optional[int] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
SCREAMING_SNAKE_CASE :Tuple = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
SCREAMING_SNAKE_CASE :List[str] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
SCREAMING_SNAKE_CASE :Optional[int] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
SCREAMING_SNAKE_CASE :List[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
SCREAMING_SNAKE_CASE :Dict = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
SCREAMING_SNAKE_CASE :Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = unet.to(torch.device('''cuda''', args.cuda_id))
SCREAMING_SNAKE_CASE :Optional[int] = pipeline.to(unet.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Optional[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
SCREAMING_SNAKE_CASE :Optional[Any] = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 159 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 358 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
__magic_name__ : List[str] = tempfile.mkdtemp()
# fmt: off
__magic_name__ : Union[str, Any] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__magic_name__ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : int = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__magic_name__ : Any = {"""unk_token""": """<unk>"""}
__magic_name__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase__ ) )
__magic_name__ : int = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__magic_name__ : List[str] = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[str]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowerCAmelCase__ )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Any:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowerCAmelCase__ )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> int:
__magic_name__ : str = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__magic_name__ : Any = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Any = self.get_tokenizer()
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Tuple = self.get_image_processor()
__magic_name__ : List[str] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__magic_name__ : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
__magic_name__ : int = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__magic_name__ : List[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ : Any = self.get_image_processor(do_normalize=lowerCAmelCase__ )
__magic_name__ : Tuple = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = self.get_image_processor()
__magic_name__ : int = self.get_tokenizer()
__magic_name__ : Union[str, Any] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Dict = self.prepare_image_inputs()
__magic_name__ : Any = image_processor(lowerCAmelCase__ , return_tensors="""np""" )
__magic_name__ : str = processor(images=lowerCAmelCase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.get_image_processor()
__magic_name__ : int = self.get_tokenizer()
__magic_name__ : int = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Optional[int] = """lower newer"""
__magic_name__ : Tuple = processor(text=lowerCAmelCase__ , return_tensors="""np""" )
__magic_name__ : Optional[int] = tokenizer(lowerCAmelCase__ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Tuple = self.get_image_processor()
__magic_name__ : Union[str, Any] = self.get_tokenizer()
__magic_name__ : List[str] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Any = """lower newer"""
__magic_name__ : Union[str, Any] = self.prepare_image_inputs()
__magic_name__ : int = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Dict = """google/owlvit-base-patch32"""
__magic_name__ : int = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
__magic_name__ : List[Any] = ["""cat""", """nasa badge"""]
__magic_name__ : Any = processor(text=lowerCAmelCase__ )
__magic_name__ : Dict = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : List[str] = """google/owlvit-base-patch32"""
__magic_name__ : Optional[Any] = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
__magic_name__ : Tuple = [["""cat""", """nasa badge"""], ["""person"""]]
__magic_name__ : Tuple = processor(text=lowerCAmelCase__ )
__magic_name__ : str = 16
__magic_name__ : str = len(lowerCAmelCase__ )
__magic_name__ : int = max([len(lowerCAmelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__ ( self ) -> Any:
__magic_name__ : Optional[int] = """google/owlvit-base-patch32"""
__magic_name__ : Any = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
__magic_name__ : str = ["""cat""", """nasa badge"""]
__magic_name__ : List[str] = processor(text=lowerCAmelCase__ )
__magic_name__ : List[Any] = 16
__magic_name__ : Any = inputs["""input_ids"""]
__magic_name__ : Optional[Any] = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = self.get_image_processor()
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : Tuple = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Tuple = self.prepare_image_inputs()
__magic_name__ : List[Any] = self.prepare_image_inputs()
__magic_name__ : List[str] = processor(images=lowerCAmelCase__ , query_images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__ ( self ) -> Any:
__magic_name__ : Optional[Any] = self.get_image_processor()
__magic_name__ : List[Any] = self.get_tokenizer()
__magic_name__ : Tuple = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ : Optional[Any] = processor.batch_decode(lowerCAmelCase__ )
__magic_name__ : Optional[int] = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 138 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__a = logging.get_logger(__name__)
@dataclass
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[Any] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : int , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ = deprecated_arg[3:]
setattr(self , SCREAMING_SNAKE_CASE_ , not kwargs.pop(SCREAMING_SNAKE_CASE_ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase_ = kwargs.pop('''torchscript''' , self.torchscript )
lowercase_ = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
lowercase_ = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**SCREAMING_SNAKE_CASE_ )
a :bool = field(default=UpperCAmelCase , metadata={'help': 'Trace the models using torchscript'} )
a :bool = field(default=UpperCAmelCase , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a :str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def _lowercase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
lowercase_ = torch.device('''cpu''' )
lowercase_ = 0
elif is_torch_tpu_available():
lowercase_ = xm.xla_device()
lowercase_ = 0
else:
lowercase_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowercase_ = torch.cuda.device_count()
return device, n_gpu
@property
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def _lowercase ( self : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowercase ( self : List[Any] ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def _lowercase ( self : Any ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def _lowercase ( self : Optional[Any] ) -> Dict:
return self.n_gpu > 0
| 30 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase =SpeechTaTokenizer(_lowerCAmelCase)
__lowercase =AddedToken('<mask>' , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase)
__lowercase =mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
tokenizer.save_pretrained(self.tmpdirname)
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase ='this is a test'
__lowercase ='this is a test'
return input_text, output_text
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Dict=2_0 , _lowerCAmelCase : Tuple=5):
'''simple docstring'''
__lowercase , __lowercase =self.get_input_output_texts(_lowerCAmelCase)
__lowercase =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase)
return text, ids
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase ='<pad>'
__lowercase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase) , _lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase) , _lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-4] , 'œ')
self.assertEqual(vocab_keys[-2] , '<mask>')
self.assertEqual(vocab_keys[-1] , '<ctc_blank>')
self.assertEqual(len(_lowerCAmelCase) , 8_1)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 7_9)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =self.get_tokenizers(do_lower_case=_lowerCAmelCase)
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}"""):
__lowercase =tokenizer.vocab_size
__lowercase =len(_lowerCAmelCase)
self.assertNotEqual(_lowerCAmelCase , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase =['aaaaa bbbbbb', 'cccccccccdddddddd']
__lowercase =tokenizer.add_tokens(_lowerCAmelCase)
__lowercase =tokenizer.vocab_size
__lowercase =len(_lowerCAmelCase)
self.assertNotEqual(_lowerCAmelCase , 0)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase)
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase))
self.assertEqual(_lowerCAmelCase , all_size + len(_lowerCAmelCase))
__lowercase =tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_lowerCAmelCase)
self.assertGreaterEqual(len(_lowerCAmelCase) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
__lowercase ={'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
__lowercase =tokenizer.add_special_tokens(_lowerCAmelCase)
__lowercase =tokenizer.vocab_size
__lowercase =len(_lowerCAmelCase)
self.assertNotEqual(_lowerCAmelCase , 0)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase)
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase))
self.assertEqual(_lowerCAmelCase , all_size_a + len(_lowerCAmelCase))
__lowercase =tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_lowerCAmelCase)
self.assertGreaterEqual(len(_lowerCAmelCase) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =self.get_tokenizer()
__lowercase =tokenizer.tokenize('This is a test')
# fmt: off
self.assertListEqual(_lowerCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
__lowercase =tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
__lowercase =tokenizer.convert_tokens_to_ids(_lowerCAmelCase)
# fmt: off
self.assertListEqual(_lowerCAmelCase , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6])
# fmt: on
__lowercase =tokenizer.convert_ids_to_tokens(_lowerCAmelCase)
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
@slow
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
__lowercase ={
'input_ids': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_lowerCAmelCase , )
| 166 | 0 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase_ = str(bin(__UpperCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase_ = str(bin(__UpperCamelCase ) )[2:]
if shift_amount >= len(__UpperCamelCase ):
return "0b0"
UpperCAmelCase_ = binary_number[: len(__UpperCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> str:
if number >= 0: # Get binary representation of positive number
UpperCAmelCase_ = '''0''' + str(bin(__UpperCamelCase ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCAmelCase_ = len(bin(__UpperCamelCase )[3:] ) # Find 2's complement of number
UpperCAmelCase_ = bin(abs(__UpperCamelCase ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ = (
'''1''' + '''0''' * (binary_number_length - len(__UpperCamelCase )) + binary_number
)
if shift_amount >= len(__UpperCamelCase ):
return "0b" + binary_number[0] * len(__UpperCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__UpperCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Dict:
# initialize config
if "resnet-50" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
UpperCAmelCase_ = DetrConfig(use_timm_backbone=__UpperCamelCase , backbone_config=__UpperCamelCase )
# set label attributes
UpperCAmelCase_ = '''panoptic''' in model_name
if is_panoptic:
UpperCAmelCase_ = 250
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''coco-detection-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Union[str, Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[Any]=False ) -> Dict:
UpperCAmelCase_ = ''''''
if is_panoptic:
UpperCAmelCase_ = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ = get_detr_config(__UpperCamelCase )
# load original model from torch hub
UpperCAmelCase_ = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'Converting model {model_name}...' )
UpperCAmelCase_ = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__UpperCamelCase ).eval()
UpperCAmelCase_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__UpperCamelCase ):
if is_panoptic:
UpperCAmelCase_ = '''detr.''' + src
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCamelCase , is_panoptic=__UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetrForSegmentation(__UpperCamelCase ) if is_panoptic else DetrForObjectDetection(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# verify our conversion on an image
UpperCAmelCase_ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
UpperCAmelCase_ = DetrImageProcessor(format=__UpperCamelCase )
UpperCAmelCase_ = processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase_ = encoding['''pixel_values''']
UpperCAmelCase_ = detr(__UpperCamelCase )
UpperCAmelCase_ = model(__UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
_lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 177 | 0 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __A ( enum.Enum ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Any = 1
__lowerCamelCase : List[Any] = 2
@add_end_docstrings(A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__(self , *A , **A ) -> Tuple:
"""simple docstring"""
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a = None
if self.model.config.prefix is not None:
_a = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a = self._sanitize_parameters(prefix=A , **self._forward_params )
_a = {**self._preprocess_params, **preprocess_params}
_a = {**self._forward_params, **forward_params}
def a__ (self , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> str:
"""simple docstring"""
_a = {}
if prefix is not None:
_a = prefix
if prefix:
_a = self.tokenizer(
A , padding=A , add_special_tokens=A , return_tensors=self.framework )
_a = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
''' [None, \'hole\']''' )
_a = handle_long_generation
preprocess_params.update(A )
_a = generate_kwargs
_a = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a = ReturnType.TENSORS
if return_type is not None:
_a = return_type
if clean_up_tokenization_spaces is not None:
_a = clean_up_tokenization_spaces
if stop_sequence is not None:
_a = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a__ (self , *A , **A ) -> List[Any]:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*A , **A )
def __call__(self , A , **A ) -> int:
"""simple docstring"""
return super().__call__(A , **A )
def a__ (self , A , A="" , A=None , **A ) -> Any:
"""simple docstring"""
_a = self.tokenizer(
prefix + prompt_text , padding=A , add_special_tokens=A , return_tensors=self.framework )
_a = prompt_text
if handle_long_generation == "hole":
_a = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a = generate_kwargs['''max_new_tokens''']
else:
_a = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def a__ (self , A , **A ) -> Any:
"""simple docstring"""
_a = model_inputs['''input_ids''']
_a = model_inputs.get('''attention_mask''' , A )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a = None
_a = None
_a = 1
else:
_a = input_ids.shape[0]
_a = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a = self.model.generate(input_ids=A , attention_mask=A , **A )
_a = generated_sequence.shape[0]
if self.framework == "pt":
_a = generated_sequence.reshape(A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a = tf.reshape(A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a__ (self , A , A=ReturnType.FULL_TEXT , A=True ) -> str:
"""simple docstring"""
_a = model_outputs['''generated_sequence'''][0]
_a = model_outputs['''input_ids''']
_a = model_outputs['''prompt_text''']
_a = generated_sequence.numpy().tolist()
_a = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a = self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a = 0
else:
_a = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=A , clean_up_tokenization_spaces=A , ) )
if return_type == ReturnType.FULL_TEXT:
_a = prompt_text + text[prompt_length:]
else:
_a = text[prompt_length:]
_a = {'''generated_text''': all_text}
records.append(A )
return records
| 211 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowercase_ = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase (__A , __A , __A , __A , __A , __A):
"""simple docstring"""
for attribute in key.split('''.'''):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_a = '''lm_head'''
_a = getattr(__A , __A)
if weight_type is not None:
_a = getattr(__A , __A).shape
else:
_a = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
else:
_a = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
_a = True
else:
for key, mapped_key in MAPPING.items():
_a = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]:
_a = True
if "*" in mapped_key:
_a = name.split(__A)[0].split('''.''')[-2]
_a = mapped_key.replace('''*''' , __A)
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "bias" in name:
_a = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a = '''weight'''
else:
_a = None
set_recursively(__A , __A , __A , __A , __A , __A)
continue
if not is_used:
unused_weights.append(__A)
logger.warning(F'''Unused weights: {unused_weights}''')
def lowerCAmelCase (__A , __A , __A , __A , __A):
"""simple docstring"""
_a = full_name.split('''conv_layers.''')[-1]
_a = name.split('''.''')
_a = int(items[0])
_a = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(__A)
@torch.no_grad()
def lowerCAmelCase (__A , __A , __A=None , __A=None , __A=True):
"""simple docstring"""
if config_path is not None:
_a = UniSpeechConfig.from_pretrained(__A)
else:
_a = UniSpeechConfig()
if is_finetuned:
if dict_path:
_a = Dictionary.load_from_json(__A)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a = target_dict.pad_index
_a = target_dict.bos_index
_a = target_dict.eos_index
_a = len(target_dict.symbols)
_a = os.path.join(__A , '''vocab.json''')
if not os.path.isdir(__A):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__A))
return
os.makedirs(__A , exist_ok=__A)
_a = target_dict.indices
# fairseq has the <pad> and <s> switched
_a = 42
_a = 43
with open(__A , '''w''' , encoding='''utf-8''') as vocab_handle:
json.dump(__A , __A)
_a = WavaVecaPhonemeCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__A , )
_a = True if config.feat_extract_norm == '''layer''' else False
_a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
_a = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A)
processor.save_pretrained(__A)
_a = UniSpeechForCTC(__A)
else:
_a = UniSpeechForPreTraining(__A)
if is_finetuned:
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1]), '''w2v_path''': checkpoint_path})
else:
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
_a = model[0].eval()
recursively_load_weights(__A , __A , __A)
hf_unispeech.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 211 | 1 |
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {
'joule': 1.0,
'kilojoule': 1000,
'megajoule': 100_0000,
'gigajoule': 10_0000_0000,
'wattsecond': 1.0,
'watthour': 3600,
'kilowatthour': 360_0000,
'newtonmeter': 1.0,
'calorie_nutr': 4_1_8_6.8,
'kilocalorie_nutr': 418_6800.00,
'electronvolt': 1.602176634e-19,
'britishthermalunit_it': 1_0_5_5.0_5_5_8_5,
'footpound': 1.3_5_5_8_1_8,
}
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : List[Any] , __A : Optional[int] ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
a_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(__lowerCAmelCase )}"""
)
raise ValueError(__lowerCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> bool:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__A ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(__A ) == 1:
return True
a_ : Tuple = series[1] - series[0]
for index in range(len(__A ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> float:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__A ) == 0:
raise ValueError('Input list must be a non empty list' )
a_ : str = 0
for val in series:
answer += val
return answer / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Tuple ="ssube/stable-diffusion-x4-upscaler-onnx"
def lowercase__ ( self , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case__ ) )
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_dummy_inputs()
lowerCAmelCase : Optional[Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Tuple = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_dummy_inputs()
lowerCAmelCase : Any = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : str = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_dummy_inputs()
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Tuple = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[int] = self.get_dummy_inputs()
lowerCAmelCase : List[str] = pipe(**snake_case__ ).images
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Union[str, Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCAmelCase : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = self.get_dummy_inputs()
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : int = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = ort.SessionOptions()
lowerCAmelCase : Tuple = False
return options
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase : Optional[int] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : List[str] = "A fantasy landscape, trending on artstation"
lowerCAmelCase : str = torch.manual_seed(0 )
lowerCAmelCase : Any = pipe(
prompt=snake_case__ , image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="np" , )
lowerCAmelCase : Tuple = output.images
lowerCAmelCase : Any = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase : Tuple = init_image.resize((128, 128) )
lowerCAmelCase : Dict = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = "A fantasy landscape, trending on artstation"
lowerCAmelCase : int = torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = pipe(
prompt=snake_case__ , image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type="np" , )
lowerCAmelCase : Any = output.images
lowerCAmelCase : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase : List[str] = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 108 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
A =logging.get_logger(__name__)
def snake_case_ (_a : List[str] ):
UpperCAmelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCAmelCase = 1_2_8
elif "12-12" in model_name:
UpperCAmelCase = 1_2
UpperCAmelCase = 1_2
elif "14-14" in model_name:
UpperCAmelCase = 1_4
UpperCAmelCase = 1_4
elif "16-16" in model_name:
UpperCAmelCase = 1_6
UpperCAmelCase = 1_6
else:
raise ValueError('''Model not supported''' )
UpperCAmelCase = '''huggingface/label-files'''
if "speech-commands" in model_name:
UpperCAmelCase = 3_5
UpperCAmelCase = '''speech-commands-v2-id2label.json'''
else:
UpperCAmelCase = 5_2_7
UpperCAmelCase = '''audioset-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ (_a : Tuple ):
if "module.v" in name:
UpperCAmelCase = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
UpperCAmelCase = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCAmelCase = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
UpperCAmelCase = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
UpperCAmelCase = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def snake_case_ (_a : Dict , _a : List[Any] ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(_a )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[3] )
UpperCAmelCase = config.hidden_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def snake_case_ (_a : Tuple ):
UpperCAmelCase = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(_a , _a )
@torch.no_grad()
def snake_case_ (_a : int , _a : Union[str, Any] , _a : Dict=False ):
UpperCAmelCase = get_audio_spectrogram_transformer_config(_a )
UpperCAmelCase = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
UpperCAmelCase = model_name_to_url[model_name]
UpperCAmelCase = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' )
# remove some keys
remove_keys(_a )
# rename some keys
UpperCAmelCase = convert_state_dict(_a , _a )
# load 🤗 model
UpperCAmelCase = ASTForAudioClassification(_a )
model.eval()
model.load_state_dict(_a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCAmelCase = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
UpperCAmelCase = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
UpperCAmelCase = 1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8
UpperCAmelCase = ASTFeatureExtractor(mean=_a , std=_a , max_length=_a )
if "speech-commands" in model_name:
UpperCAmelCase = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
UpperCAmelCase = dataset[0]['''audio''']['''array''']
else:
UpperCAmelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
UpperCAmelCase , UpperCAmelCase = torchaudio.load(_a )
UpperCAmelCase = waveform.squeeze().numpy()
UpperCAmelCase = feature_extractor(_a , sampling_rate=1_6_0_0_0 , return_tensors='''pt''' )
# forward pass
UpperCAmelCase = model(**_a )
UpperCAmelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCAmelCase = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCAmelCase = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCAmelCase = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCAmelCase = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCAmelCase = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCAmelCase = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCAmelCase = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , _a , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_a )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A =parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 192 | import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_UpperCAmelCase = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def UpperCamelCase ( __lowercase : str ,__lowercase : Dict=None ):
'''simple docstring'''
require_version(deps[pkg] ,__lowercase )
| 192 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowerCamelCase__ ( metaclass=lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowerCamelCase__ ( metaclass=lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowerCamelCase__ ( metaclass=lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax''', '''transformers'''] )
| 5 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase : Dict = cva.getAffineTransform(_UpperCAmelCase, _UpperCAmelCase )
return cva.warpAffine(_UpperCAmelCase, _UpperCAmelCase, (rows, cols) )
if __name__ == "__main__":
# read original image
__A : List[str] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__A : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__A , __A : Optional[Any] = gray_img.shape
# set different points to rotate image
__A : int = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__A : Any = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__A : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__A : List[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__A : List[str] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__A : Union[str, Any] = plt.figure(1)
__A : Optional[Any] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 138 | 0 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __snake_case( *_lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase=True , _lowerCAmelCase=2 ) -> Any:
from .. import __version__
snake_case__ : Union[str, Any] = take_from
snake_case__ : Union[str, Any] = ()
if not isinstance(args[0] , lowercase_ ):
snake_case__ : Any = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
snake_case__ : Dict = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
snake_case__ : Optional[int] = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
snake_case__ : Dict = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
snake_case__ : Optional[int] = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
snake_case__ : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
snake_case__ : Any = inspect.getouterframes(inspect.currentframe() )[1]
snake_case__ : int = call_frame.filename
snake_case__ : Dict = call_frame.lineno
snake_case__ : List[Any] = call_frame.function
snake_case__ , snake_case__ : Dict = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 356 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict ):
snake_case__ : List[str] = {}
def lowerCamelCase ( self : List[Any] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Tuple=1 ):
if self.graph.get(snake_case_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case__ : Tuple = [[w, v]]
if not self.graph.get(snake_case_ ):
snake_case__ : Optional[Any] = []
def lowerCamelCase ( self : List[str] ):
return list(self.graph )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Dict ):
if self.graph.get(snake_case_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple=-2 , snake_case_ : Tuple=-1 ):
if s == d:
return []
snake_case__ : Optional[Any] = []
snake_case__ : List[Any] = []
if s == -2:
snake_case__ : Union[str, Any] = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(snake_case_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(snake_case_ ) != 0:
snake_case__ : Tuple = stack[len(snake_case_ ) - 1]
else:
snake_case__ : Tuple = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return visited
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Any=-1 ):
if c == -1:
snake_case__ : Union[str, Any] = floor(random() * 10_000 ) + 10
for i in range(snake_case_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case__ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(snake_case_ , snake_case_ , 1 )
def lowerCamelCase ( self : List[Any] , snake_case_ : str=-2 ):
snake_case__ : Tuple = deque()
snake_case__ : str = []
if s == -2:
snake_case__ : str = list(self.graph )[0]
d.append(snake_case_ )
visited.append(snake_case_ )
while d:
snake_case__ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[Any] ):
snake_case__ : Optional[int] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Any ):
return len(self.graph[u] )
def lowerCamelCase ( self : List[str] , snake_case_ : Union[str, Any]=-2 ):
snake_case__ : str = []
snake_case__ : Any = []
if s == -2:
snake_case__ : Any = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : Dict = s
snake_case__ : List[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(snake_case_ ) != 0:
snake_case__ : Optional[int] = stack[len(snake_case_ ) - 1]
else:
snake_case__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return sorted_nodes
def lowerCamelCase ( self : int ):
snake_case__ : List[str] = []
snake_case__ : Union[str, Any] = []
snake_case__ : Optional[int] = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : List[Any] = -2
snake_case__ : Union[str, Any] = []
snake_case__ : Optional[Any] = s
snake_case__ : Optional[Any] = False
snake_case__ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ : str = len(snake_case_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ : List[str] = True
if len(snake_case_ ) != 0:
snake_case__ : Any = stack[len(snake_case_ ) - 1]
else:
snake_case__ : Optional[Any] = False
indirect_parents.append(snake_case_ )
snake_case__ : Union[str, Any] = s
snake_case__ : str = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return list(snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : List[str] = []
snake_case__ : str = []
snake_case__ : Tuple = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : Optional[int] = -2
snake_case__ : List[str] = []
snake_case__ : Optional[int] = s
snake_case__ : str = False
snake_case__ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ : Optional[Any] = len(snake_case_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ : List[str] = True
if len(snake_case_ ) != 0:
snake_case__ : List[str] = stack[len(snake_case_ ) - 1]
else:
snake_case__ : int = False
indirect_parents.append(snake_case_ )
snake_case__ : Any = s
snake_case__ : Tuple = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return False
def lowerCamelCase ( self : int , snake_case_ : List[Any]=-2 , snake_case_ : List[str]=-1 ):
snake_case__ : List[Any] = time()
self.dfs(snake_case_ , snake_case_ )
snake_case__ : Optional[Any] = time()
return end - begin
def lowerCamelCase ( self : int , snake_case_ : List[str]=-2 ):
snake_case__ : Any = time()
self.bfs(snake_case_ )
snake_case__ : List[str] = time()
return end - begin
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[str] ):
snake_case__ : List[str] = {}
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : int , snake_case_ : Union[str, Any]=1 ):
# check if the u exists
if self.graph.get(snake_case_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case__ : Dict = [[w, v]]
# add the other way
if self.graph.get(snake_case_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case__ : Any = [[w, u]]
def lowerCamelCase ( self : int , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
if self.graph.get(snake_case_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(snake_case_ )
# the other way round
if self.graph.get(snake_case_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : Tuple=-2 , snake_case_ : Union[str, Any]=-1 ):
if s == d:
return []
snake_case__ : Dict = []
snake_case__ : Optional[int] = []
if s == -2:
snake_case__ : Any = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(snake_case_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(snake_case_ ) != 0:
snake_case__ : str = stack[len(snake_case_ ) - 1]
else:
snake_case__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return visited
def lowerCamelCase ( self : List[str] , snake_case_ : str=-1 ):
if c == -1:
snake_case__ : Union[str, Any] = floor(random() * 10_000 ) + 10
for i in range(snake_case_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case__ : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(snake_case_ , snake_case_ , 1 )
def lowerCamelCase ( self : str , snake_case_ : Dict=-2 ):
snake_case__ : Union[str, Any] = deque()
snake_case__ : Optional[int] = []
if s == -2:
snake_case__ : Tuple = list(self.graph )[0]
d.append(snake_case_ )
visited.append(snake_case_ )
while d:
snake_case__ : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase ( self : Any , snake_case_ : Union[str, Any] ):
return len(self.graph[u] )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : str = []
snake_case__ : List[str] = []
snake_case__ : str = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : Tuple = -2
snake_case__ : Optional[int] = []
snake_case__ : str = s
snake_case__ : int = False
snake_case__ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ : Tuple = len(snake_case_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ : Optional[Any] = True
if len(snake_case_ ) != 0:
snake_case__ : Dict = stack[len(snake_case_ ) - 1]
else:
snake_case__ : int = False
indirect_parents.append(snake_case_ )
snake_case__ : int = s
snake_case__ : Tuple = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return list(snake_case_ )
def lowerCamelCase ( self : str ):
snake_case__ : Tuple = []
snake_case__ : Tuple = []
snake_case__ : Any = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
snake_case__ : List[Any] = -2
snake_case__ : Dict = []
snake_case__ : str = s
snake_case__ : Optional[Any] = False
snake_case__ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ : Optional[int] = len(snake_case_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ : Any = True
if len(snake_case_ ) != 0:
snake_case__ : Any = stack[len(snake_case_ ) - 1]
else:
snake_case__ : Tuple = False
indirect_parents.append(snake_case_ )
snake_case__ : Optional[int] = s
snake_case__ : List[Any] = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return False
def lowerCamelCase ( self : Union[str, Any] ):
return list(self.graph )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : int=-2 , snake_case_ : Any=-1 ):
snake_case__ : int = time()
self.dfs(snake_case_ , snake_case_ )
snake_case__ : List[str] = time()
return end - begin
def lowerCamelCase ( self : List[Any] , snake_case_ : Union[str, Any]=-2 ):
snake_case__ : Optional[int] = time()
self.bfs(snake_case_ )
snake_case__ : str = time()
return end - begin
| 43 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple = 1 / sqrt(2 ) ):
"""simple docstring"""
a__ : Dict =tau * frequency / samplerate
a__ : List[Any] =sin(__UpperCAmelCase )
a__ : List[str] =cos(__UpperCAmelCase )
a__ : List[Any] =_sin / (2 * q_factor)
a__ : Dict =(1 - _cos) / 2
a__ : Tuple =1 - _cos
a__ : Dict =1 + alpha
a__ : Optional[Any] =-2 * _cos
a__ : Any =1 - alpha
a__ : str =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str = 1 / sqrt(2 ) ):
"""simple docstring"""
a__ : Optional[Any] =tau * frequency / samplerate
a__ : Optional[int] =sin(__UpperCAmelCase )
a__ : Tuple =cos(__UpperCAmelCase )
a__ : Any =_sin / (2 * q_factor)
a__ : Any =(1 + _cos) / 2
a__ : Union[str, Any] =-1 - _cos
a__ : int =1 + alpha
a__ : Tuple =-2 * _cos
a__ : Any =1 - alpha
a__ : Dict =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] = 1 / sqrt(2 ) ):
"""simple docstring"""
a__ : str =tau * frequency / samplerate
a__ : Dict =sin(__UpperCAmelCase )
a__ : Optional[Any] =cos(__UpperCAmelCase )
a__ : str =_sin / (2 * q_factor)
a__ : Dict =_sin / 2
a__ : Tuple =0
a__ : str =-ba
a__ : Optional[Any] =1 + alpha
a__ : Any =-2 * _cos
a__ : Any =1 - alpha
a__ : Tuple =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] = 1 / sqrt(2 ) ):
"""simple docstring"""
a__ : Tuple =tau * frequency / samplerate
a__ : Optional[int] =sin(__UpperCAmelCase )
a__ : Union[str, Any] =cos(__UpperCAmelCase )
a__ : Any =_sin / (2 * q_factor)
a__ : Tuple =1 - alpha
a__ : Dict =-2 * _cos
a__ : Optional[Any] =1 + alpha
a__ : str =IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int = 1 / sqrt(2 ) , ):
"""simple docstring"""
a__ : int =tau * frequency / samplerate
a__ : int =sin(__UpperCAmelCase )
a__ : Optional[Any] =cos(__UpperCAmelCase )
a__ : Any =_sin / (2 * q_factor)
a__ : Tuple =10 ** (gain_db / 40)
a__ : Dict =1 + alpha * big_a
a__ : Union[str, Any] =-2 * _cos
a__ : str =1 - alpha * big_a
a__ : List[str] =1 + alpha / big_a
a__ : Any =-2 * _cos
a__ : Optional[Any] =1 - alpha / big_a
a__ : int =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] = 1 / sqrt(2 ) , ):
"""simple docstring"""
a__ : Optional[int] =tau * frequency / samplerate
a__ : List[Any] =sin(__UpperCAmelCase )
a__ : List[str] =cos(__UpperCAmelCase )
a__ : List[str] =_sin / (2 * q_factor)
a__ : Any =10 ** (gain_db / 40)
a__ : Union[str, Any] =(big_a + 1) - (big_a - 1) * _cos
a__ : int =(big_a + 1) + (big_a - 1) * _cos
a__ : Any =(big_a - 1) - (big_a + 1) * _cos
a__ : Optional[Any] =(big_a - 1) + (big_a + 1) * _cos
a__ : List[str] =2 * sqrt(__UpperCAmelCase ) * alpha
a__ : Optional[Any] =big_a * (pmc + aaa)
a__ : int =2 * big_a * mpc
a__ : List[Any] =big_a * (pmc - aaa)
a__ : Optional[Any] =ppmc + aaa
a__ : List[Any] =-2 * pmpc
a__ : Tuple =ppmc - aaa
a__ : Any =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] = 1 / sqrt(2 ) , ):
"""simple docstring"""
a__ : List[str] =tau * frequency / samplerate
a__ : Optional[Any] =sin(__UpperCAmelCase )
a__ : Any =cos(__UpperCAmelCase )
a__ : Tuple =_sin / (2 * q_factor)
a__ : str =10 ** (gain_db / 40)
a__ : Optional[Any] =(big_a + 1) - (big_a - 1) * _cos
a__ : Tuple =(big_a + 1) + (big_a - 1) * _cos
a__ : int =(big_a - 1) - (big_a + 1) * _cos
a__ : Union[str, Any] =(big_a - 1) + (big_a + 1) * _cos
a__ : Any =2 * sqrt(__UpperCAmelCase ) * alpha
a__ : Optional[int] =big_a * (ppmc + aaa)
a__ : Optional[Any] =-2 * big_a * pmpc
a__ : int =big_a * (ppmc - aaa)
a__ : str =pmc + aaa
a__ : Any =2 * mpc
a__ : Tuple =pmc - aaa
a__ : List[str] =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 95 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase__: Tuple = grid[0]
for row_n in range(1 , len(__UpperCAmelCase ) ):
lowercase__: Tuple = grid[row_n]
lowercase__: Dict = fill_row(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Union[str, Any] = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(__UpperCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = '''▁'''
A__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__ = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__ = {'''vinai/bartpho-syllable''': 1024}
class a ( __lowerCamelCase ):
__lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self :str ,__lowercase :int ,__lowercase :Optional[Any] ,__lowercase :Optional[Any]="<s>" ,__lowercase :List[str]="</s>" ,__lowercase :Union[str, Any]="</s>" ,__lowercase :Optional[Any]="<s>" ,__lowercase :List[Any]="<unk>" ,__lowercase :List[Any]="<pad>" ,__lowercase :Tuple="<mask>" ,__lowercase :Optional[Dict[str, Any]] = None ,**__lowercase :Optional[int] ,):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Tuple = AddedToken(__lowercase ,lstrip=__lowercase ,rstrip=__lowercase ) if isinstance(__lowercase ,__lowercase ) else mask_token
snake_case__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase ,eos_token=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,cls_token=__lowercase ,pad_token=__lowercase ,mask_token=__lowercase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowercase ,)
snake_case__ : int = vocab_file
snake_case__ : str = monolingual_vocab_file
snake_case__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowercase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : int = {}
snake_case__ : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowercase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = cnt
cnt += 1
with open(__lowercase ,'''r''' ,encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : str = line.strip().split()[0]
snake_case__ : Optional[Any] = len(self.fairseq_tokens_to_ids )
if str(__lowercase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Union[str, Any] = len(self.fairseq_tokens_to_ids )
snake_case__ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :Optional[Any] ):
snake_case__ : int = self.__dict__.copy()
snake_case__ : Optional[int] = None
snake_case__ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Union[str, Any] ,__lowercase :Optional[Any] ):
snake_case__ : Tuple = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
snake_case__ : List[str] = {}
snake_case__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCamelCase ( self :List[Any] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self :str ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ,__lowercase :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase ,token_ids_a=__lowercase ,already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def __lowerCamelCase ( self :Any ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : Union[str, Any] = [self.sep_token_id]
snake_case__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCamelCase ( self :Optional[int] ):
return len(self.fairseq_ids_to_tokens )
def __lowerCamelCase ( self :int ):
snake_case__ : str = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self :Dict ,__lowercase :str ):
return self.sp_model.encode(__lowercase ,out_type=__lowercase )
def __lowerCamelCase ( self :str ,__lowercase :Any ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __lowerCamelCase ( self :List[Any] ,__lowercase :Optional[Any] ):
return self.fairseq_ids_to_tokens[index]
def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[int] ):
snake_case__ : Optional[Any] = ''''''.join(__lowercase ).replace(__lowercase ,''' ''' ).strip()
return out_string
def __lowerCamelCase ( self :str ,__lowercase :str ,__lowercase :Optional[str] = None ):
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : Dict = os.path.join(
__lowercase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Any = os.path.join(
__lowercase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase ,'''wb''' ) as fi:
snake_case__ : Any = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowercase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,__lowercase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowercase ,'''w''' ,encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(__lowercase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 44 |
import argparse
from collections import defaultdict
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__lowerCAmelCase , '''r''' ) as f:
snake_case__ : str = f.readlines()
snake_case__ : List[str] = f"""class {class_name}("""
snake_case__ : Any = f"""{4 * ' '}def {test_name}("""
snake_case__ : Optional[int] = f"""{8 * ' '}{correct_line.split()[0]}"""
snake_case__ : List[str] = f"""{16 * ' '}{correct_line.split()[0]}"""
snake_case__ : Any = False
snake_case__ : Optional[int] = False
snake_case__ : Optional[Any] = False
snake_case__ : int = False
snake_case__ : Union[str, Any] = 0
snake_case__ : str = 0
snake_case__ : Union[str, Any] = []
for line in lines:
if line.startswith(__lowerCAmelCase ):
snake_case__ : Optional[Any] = True
elif in_class and line.startswith(__lowerCAmelCase ):
snake_case__ : Optional[int] = True
elif in_class and in_func and (line.startswith(__lowerCAmelCase ) or line.startswith(__lowerCAmelCase )):
snake_case__ : int = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
snake_case__ : Tuple = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
snake_case__ : List[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
snake_case__ : Optional[int] = False
else:
new_lines.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' ) as f:
for line in new_lines:
f.write(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Dict:
"""simple docstring"""
if fail is not None:
with open(__lowerCAmelCase , '''r''' ) as f:
snake_case__ : Optional[int] = {l.strip() for l in f.readlines()}
else:
snake_case__ : Tuple = None
with open(__lowerCAmelCase , '''r''' ) as f:
snake_case__ : Optional[int] = f.readlines()
snake_case__ : Tuple = defaultdict(__lowerCAmelCase )
for line in correct_lines:
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
A__ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 44 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , _lowercase ):
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Union[str, Any] = load_tool("""text-classification""" )
self.tool.setup()
lowerCamelCase__ : Optional[int] = load_tool("""text-classification""" , remote=UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[int] = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(UpperCamelCase__ , """positive""" )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[int] = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(UpperCamelCase__ , """positive""" )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(UpperCamelCase__ , """positive""" )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[Any] = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(UpperCamelCase__ , """positive""" )
| 41 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : int = 10_00 ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 3
lowerCAmelCase_ : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 120 | 0 |
__lowerCamelCase : Optional[Any] = tuple[float, float, float]
__lowerCamelCase : List[str] = tuple[float, float, float]
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Vectorad:
UpperCamelCase : str = end_pointa[0] - end_pointa[0]
UpperCamelCase : Any = end_pointa[1] - end_pointa[1]
UpperCamelCase : Optional[Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Vectorad:
UpperCamelCase : Optional[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCamelCase : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCamelCase : Union[str, Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
return tuple(round(_lowerCAmelCase , _lowerCAmelCase ) for x in vector ) == (0, 0, 0)
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 10 ) -> bool:
UpperCamelCase : List[str] = create_vector(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[str] = create_vector(_lowerCAmelCase , _lowerCAmelCase )
return is_zero_vector(get_ad_vectors_cross(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
| 354 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCamelCase : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCamelCase : int = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCamelCase : Union[str, Any] = model(A_ , labels=A_ ).loss
UpperCamelCase : List[str] = -tf.math.reduce_mean(A_ ).numpy()
UpperCamelCase : Union[str, Any] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 140 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=3 , A__=True , A__=True , A__=0.1 , A__=0.1 , A__=224 , A__=1000 , A__=[3, 3, 6, 4] , A__=[48, 56, 112, 220] , ):
A__ : Any = parent
A__ : Dict = batch_size
A__ : Optional[Any] = num_channels
A__ : Tuple = is_training
A__ : Union[str, Any] = use_labels
A__ : Dict = hidden_dropout_prob
A__ : List[str] = attention_probs_dropout_prob
A__ : Any = num_labels
A__ : Union[str, Any] = image_size
A__ : List[Any] = layer_depths
A__ : Optional[int] = embed_dims
def __A ( self ):
A__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Union[str, Any] = None
if self.use_labels:
A__ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
A__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=A__ , layer_scale_init_value=1e-5 , )
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = SwiftFormerModel(config=A__ )
model.to(A__ )
model.eval()
A__ : str = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __A ( self , A__ , A__ , A__ ):
A__ : Optional[Any] = self.num_labels
A__ : List[str] = SwiftFormerForImageClassification(A__ )
model.to(A__ )
model.eval()
A__ : Tuple = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
A__ : int = SwiftFormerForImageClassification(A__ )
model.to(A__ )
model.eval()
A__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Dict = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
((A__) , (A__) , (A__)) : Tuple = self.prepare_config_and_inputs()
A__ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__: int = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__: Optional[Any] = False
UpperCAmelCase__: int = False
UpperCAmelCase__: Optional[Any] = False
UpperCAmelCase__: List[Any] = False
UpperCAmelCase__: Union[str, Any] = False
def __A ( self ):
A__ : Any = SwiftFormerModelTester(self )
A__ : str = ConfigTester(
self , config_class=A__ , has_text_modality=A__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def __A ( self ):
pass
def __A ( self ):
A__ , A__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Dict = model_class(A__ )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def __A ( self ):
A__ , A__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[int] = model_class(A__ )
A__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Dict = [*signature.parameters.keys()]
A__ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def __A ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def __A ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple = SwiftFormerModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def __A ( self ):
pass
def __A ( self ):
def check_hidden_states_output(A__ , A__ , A__ ):
A__ : Dict = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
A__ : List[Any] = model(**self._prepare_for_class(A__ , A__ ) )
A__ : Union[str, Any] = outputs.hidden_states
A__ : Tuple = 8
self.assertEqual(len(A__ ) , A__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(A__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
A__ , A__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Union[str, Any] = True
check_hidden_states_output(A__ , A__ , A__ )
def __A ( self ):
def _config_zero_init(A__ ):
A__ : Optional[int] = copy.deepcopy(A__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(A__ , A__ , 1e-10 )
if isinstance(getattr(A__ , A__ , A__ ) , A__ ):
A__ : str = _config_zero_init(getattr(A__ , A__ ) )
setattr(A__ , A__ , A__ )
return configs_no_init
A__ , A__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = _config_zero_init(A__ )
for model_class in self.all_model_classes:
A__ : List[str] = model_class(config=A__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __A ( self ):
pass
def UpperCamelCase () -> Tuple:
A__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _a (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def __A ( self ):
A__ : Optional[int] = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(A__ )
A__ : Tuple = self.default_image_processor
A__ : Optional[Any] = prepare_img()
A__ : Tuple = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
A__ : Tuple = model(**A__ )
# verify the logits
A__ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A__ )
A__ : Tuple = torch.tensor([[-2.1_703e00, 2.1_107e00, -2.0_811e00]] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) )
| 192 |
import argparse
import os
import re
A_ : List[str] = 'src/diffusers'
# Pattern that looks at the indentation in a line.
A_ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A_ : int = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A_ : Optional[int] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A_ : List[Any] = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A_ : List[str] = re.compile(r'\[([^\]]+)\]')
def UpperCamelCase (lowercase_: List[str] ) -> Dict:
A__ : Optional[Any] = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def UpperCamelCase (lowercase_: Dict , lowercase_: Any="" , lowercase_: Any=None , lowercase_: Any=None ) -> Tuple:
A__ : Optional[Any] = 0
A__ : str = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
A__ : Tuple = ["""\n""".join(lines[:index] )]
else:
A__ : Optional[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A__ : Union[str, Any] = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
A__ : Union[str, Any] = [lines[index + 1]]
index += 1
else:
A__ : List[Any] = []
else:
blocks.append("""\n""".join(lowercase_ ) )
A__ : int = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append("""\n""".join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def UpperCamelCase (lowercase_: str ) -> str:
def _inner(lowercase_: Union[str, Any] ):
return key(lowercase_ ).lower().replace("""_""" , """""" )
return _inner
def UpperCamelCase (lowercase_: int , lowercase_: Any=None ) -> str:
# If no key is provided, we use a noop.
def noop(lowercase_: Any ):
return x
if key is None:
A__ : Optional[Any] = noop
# Constants are all uppercase, they go first.
A__ : Optional[int] = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A__ : List[Any] = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
A__ : Tuple = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
A__ : Any = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def UpperCamelCase (lowercase_: List[Any] ) -> List[Any]:
# This inner function sort imports between [ ].
def _replace(lowercase_: List[Any] ):
A__ : Tuple = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
A__ : Optional[int] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(lowercase_ )] ) + "]"
A__ : Dict = import_statement.split("""\n""" )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A__ : List[str] = 2 if lines[1].strip() == """[""" else 1
A__ : Any = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A__ : Any = sort_objects(lowercase_ , key=lambda lowercase_ : x[1] )
A__ : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
A__ : Any = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : Tuple = keys[:-1]
A__ : List[Any] = get_indent(lines[1] ) + """, """.join([f"""\"{k}\"""" for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
A__ : int = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: str=True ) -> Any:
with open(lowercase_ , """r""" ) as f:
A__ : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A__ : Tuple = split_code_in_indented_blocks(
lowercase_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A__ : int = main_blocks[block_idx]
A__ : Optional[Any] = block.split("""\n""" )
# Get to the start of the imports.
A__ : Any = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A__ : Optional[Any] = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
A__ : Union[str, Any] = """\n""".join(block_lines[line_idx:-1] )
A__ : List[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A__ : Union[str, Any] = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
A__ : Optional[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A__ : int = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A__ : int = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
A__ : List[Any] = [x[0] for x in sorted(lowercase_ , key=lambda lowercase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A__ : Optional[int] = 0
A__ : Any = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
A__ : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
A__ : Tuple = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(lowercase_ , """w""" ) as f:
f.write("""\n""".join(lowercase_ ) )
def UpperCamelCase (lowercase_: Any=True ) -> Any:
A__ : Dict = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
A__ : List[Any] = sort_imports(os.path.join(lowercase_ , """__init__.py""" ) , check_only=lowercase_ )
if result:
A__ : Optional[int] = [os.path.join(lowercase_ , """__init__.py""" )]
if len(lowercase_ ) > 0:
raise ValueError(f"""Would overwrite {len(lowercase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A_ : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 192 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364 | import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : str = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Union[str, Any] = {}
__snake_case : List[Any] = R""".*sequential.(\d+).*"""
__snake_case : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__snake_case : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
__snake_case : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[Any] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : Optional[int] = value
__snake_case : Any = mixed_qkv.size(0 ) // 3
__snake_case : List[Any] = mixed_qkv[:qkv_dim]
__snake_case : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Any = query_layer
__snake_case : Dict = key_layer
__snake_case : Optional[Any] = value_layer
else:
__snake_case : List[str] = value
return model_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
__snake_case : Tuple = clap_model.state_dict()
__snake_case : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 20 | 0 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
_lowerCamelCase : Optional[Any] = quote(A_ )
return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
| 72 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = """wav2vec2"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=(512, 512, 512, 512, 1_500) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=512 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ) -> int:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :int = feat_extract_norm
__UpperCamelCase :Tuple = feat_extract_activation
__UpperCamelCase :Union[str, Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :int = list(__lowercase)
__UpperCamelCase :List[Any] = conv_bias
__UpperCamelCase :Optional[int] = num_conv_pos_embeddings
__UpperCamelCase :Dict = num_conv_pos_embedding_groups
__UpperCamelCase :Any = len(self.conv_dim)
__UpperCamelCase :List[str] = num_hidden_layers
__UpperCamelCase :int = intermediate_size
__UpperCamelCase :str = hidden_act
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :int = hidden_dropout
__UpperCamelCase :Tuple = attention_dropout
__UpperCamelCase :List[str] = activation_dropout
__UpperCamelCase :Optional[Any] = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Any = layerdrop
__UpperCamelCase :str = layer_norm_eps
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :str = do_stable_layer_norm
__UpperCamelCase :Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Tuple = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Dict = mask_time_min_masks
__UpperCamelCase :str = mask_feature_prob
__UpperCamelCase :List[str] = mask_feature_length
__UpperCamelCase :Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :List[Any] = num_codevector_groups
__UpperCamelCase :Tuple = contrastive_logits_temperature
__UpperCamelCase :Optional[int] = feat_quantizer_dropout
__UpperCamelCase :Optional[int] = num_negatives
__UpperCamelCase :List[Any] = codevector_dim
__UpperCamelCase :str = proj_codevector_dim
__UpperCamelCase :List[str] = diversity_loss_weight
# ctc loss
__UpperCamelCase :Tuple = ctc_loss_reduction
__UpperCamelCase :Tuple = ctc_zero_infinity
# adapter
__UpperCamelCase :List[str] = add_adapter
__UpperCamelCase :Tuple = adapter_kernel_size
__UpperCamelCase :str = adapter_stride
__UpperCamelCase :Tuple = num_adapter_layers
__UpperCamelCase :Tuple = output_hidden_size or hidden_size
__UpperCamelCase :Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase :Optional[int] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :str = xvector_output_dim
@property
def UpperCamelCase__ ( self) -> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 43 | 0 |
from math import factorial
def lowerCAmelCase (__UpperCamelCase : Optional[int] = 1_0_0 ):
"""simple docstring"""
return sum(map(A_ , str(factorial(A_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 358 | """simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 3 , __UpperCamelCase : int = 7 , __UpperCamelCase : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCamelCase =0
__UpperCamelCase =1
for current_denominator in range(1 , limit + 1 ):
__UpperCamelCase =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__UpperCamelCase =current_numerator
__UpperCamelCase =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 85 | 0 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44 | """simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCamelCase__ :Tuple = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
UpperCamelCase__ :Any = '''The dog is cute and lives in the garden house'''
UpperCamelCase__ :Any = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
UpperCamelCase__ :Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ :Union[str, Any] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCamelCase__ :Any = model(UpperCamelCase_ )['''last_hidden_state''']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1e-3 ) ) | 219 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase__ :Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(__a )
# Let's go
UpperCamelCase__ :Optional[int] = parser.parse_args()
if not hasattr(__a , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ :Optional[int] = args.func(__a )
service.run()
if __name__ == "__main__":
main() | 219 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Dict:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__lowerCamelCase = number_of_bytes // partitions
__lowerCamelCase = []
for i in range(__lowercase ):
__lowerCamelCase = i * bytes_per_partition + 1
__lowerCamelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | import logging
from transformers import PretrainedConfig
_UpperCAmelCase = logging.getLogger(__name__)
_UpperCAmelCase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''bertabs'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=5_1_2 , lowercase=6 , lowercase=5_1_2 , lowercase=8 , lowercase=5_1_2 , lowercase=0.2 , lowercase=6 , lowercase=7_6_8 , lowercase=8 , lowercase=2_0_4_8 , lowercase=0.2 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = vocab_size
A_ : Union[str, Any] = max_pos
A_ : List[str] = enc_layers
A_ : Tuple = enc_hidden_size
A_ : List[Any] = enc_heads
A_ : str = enc_ff_size
A_ : Optional[Any] = enc_dropout
A_ : Dict = dec_layers
A_ : Optional[Any] = dec_hidden_size
A_ : int = dec_heads
A_ : Any = dec_ff_size
A_ : List[str] = dec_dropout
| 140 | 0 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_SCREAMING_SNAKE_CASE , int(b / 2 ) ) * actual_power(_SCREAMING_SNAKE_CASE , int(b / 2 ) )
else:
return a * actual_power(_SCREAMING_SNAKE_CASE , int(b / 2 ) ) * actual_power(_SCREAMING_SNAKE_CASE , int(b / 2 ) )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return actual_power(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(power(-2, -3))
| 67 |
'''simple docstring'''
import os
from pathlib import Path
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
from torch.utils.cpp_extension import load
UpperCAmelCase_ : Union[str, Any] = Path(_SCREAMING_SNAKE_CASE ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
UpperCAmelCase_ : Any = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _SCREAMING_SNAKE_CASE , with_cuda=_SCREAMING_SNAKE_CASE , extra_include_paths=[str(_SCREAMING_SNAKE_CASE )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 67 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_( snake_case : Union[str, Any] ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __snake_case :
def __init__( self ,snake_case ,):
'''simple docstring'''
lowercase : Any = parent
lowercase : Tuple = 13
lowercase : str = 7
lowercase : Dict = True
lowercase : Dict = True
lowercase : str = True
lowercase : List[str] = True
lowercase : int = True
lowercase : Union[str, Any] = False
lowercase : Dict = False
lowercase : List[Any] = False
lowercase : List[Any] = 2
lowercase : Optional[Any] = 99
lowercase : int = 0
lowercase : Tuple = 32
lowercase : int = 2
lowercase : Tuple = 4
lowercase : List[Any] = 0.1
lowercase : Tuple = 0.1
lowercase : List[Any] = 512
lowercase : int = 16
lowercase : Dict = 2
lowercase : int = 0.02
lowercase : Union[str, Any] = 3
lowercase : Any = 4
lowercase : List[Any] = """last"""
lowercase : Tuple = True
lowercase : List[Any] = None
lowercase : Any = 0
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
lowercase : Tuple = None
if self.use_input_lengths:
lowercase : List[str] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Tuple = None
if self.use_token_type_ids:
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
lowercase : List[str] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : str = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
lowercase : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
lowercase : str = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertModel(config=snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : Optional[Any] = model(snake_case )
lowercase : List[Any] = [input_ids, input_mask]
lowercase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertWithLMHeadModel(snake_case )
lowercase : Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertForQuestionAnsweringSimple(snake_case )
lowercase : Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : Tuple = model(snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Union[str, Any] = TFFlaubertForSequenceClassification(snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : str = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_labels
lowercase : List[str] = TFFlaubertForTokenClassification(config=snake_case )
lowercase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_choices
lowercase : Dict = TFFlaubertForMultipleChoice(config=snake_case )
lowercase : Any = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Optional[Any] = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Dict = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : int = config_and_inputs
lowercase : List[str] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Dict= (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_a : Optional[Any]= (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_a : Any= (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : Tuple= False
_a : int= False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = TFFlaubertModelTester(self )
lowercase : List[Any] = ConfigTester(self ,config_class=snake_case ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = TFFlaubertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
lowercase : int = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
lowercase : Dict = model(snake_case )[0]
lowercase : Union[str, Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,snake_case )
# compare the actual values for a slice.
lowercase : Tuple = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 20 | 0 |
__SCREAMING_SNAKE_CASE = [0, 2, 4, 6, 8]
__SCREAMING_SNAKE_CASE = [1, 3, 5, 7, 9]
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
A = 0
for digit in range(10 ):
A = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a__ , a__ )
return result
A = 0
for digita in range(10 ):
A = digita
if (remainder + digita) % 2 == 0:
A = ODD_DIGITS
else:
A = EVEN_DIGITS
for digita in other_parity_digits:
A = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a__ , a__ , )
return result
def UpperCAmelCase ( _lowerCamelCase = 9 ):
A = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a__ , 0 , [0] * length , a__ )
return result
if __name__ == "__main__":
print(F"""{solution() = }""") | 371 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
__SCREAMING_SNAKE_CASE = {"""bert_for_seq_generation""": 512}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = []
a__ = ["input_ids", "attention_mask"]
def __init__( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="<unk>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Optional[int]="<::::>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Tuple , ) -> None:
A : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
A : Union[str, Any] = vocab_file
A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
A : str = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Tuple:
A : Tuple = self.__dict__.copy()
A : Optional[int] = None
return state
def __setstate__( self : Dict , __lowerCamelCase : Union[str, Any] ) -> Tuple:
A : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A : int = {}
A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[str, Any] ) -> Dict:
return self.sp_model.piece_to_id(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
A : Optional[int] = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Optional[int] ) -> List[str]:
A : List[str] = []
A : List[str] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
A : Union[str, Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A : str = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
A : str = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,) | 256 | 0 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = np.max(_outputs, axis=-1, keepdims=A_ )
_lowerCamelCase : str = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=A_ )
class __snake_case ( _lowercase):
snake_case__ : Tuple = "sigmoid"
snake_case__ : str = "softmax"
snake_case__ : Tuple = "none"
@add_end_docstrings(
_lowercase , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class __snake_case ( _lowercase):
snake_case__ : Dict = False
snake_case__ : List[str] = ClassificationFunction.NONE
def __init__( self : List[str] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : List[str]="" , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tokenizer_kwargs
_lowerCamelCase : List[Any] = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
_lowerCamelCase : str = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) or top_k is None:
_lowerCamelCase : Tuple = top_k
_lowerCamelCase : List[str] = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , __lowerCAmelCase , )
if return_all_scores:
_lowerCamelCase : Dict = None
else:
_lowerCamelCase : Any = 1
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : str = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : int = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Dict = '''top_k''' not in kwargs
if isinstance(args[0] , __lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.framework
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] , __lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return self.model(**__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : int=1 , __lowerCAmelCase : Dict=True ):
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Optional[int] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : Optional[int] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
_lowerCamelCase : List[str] = self.model.config.function_to_apply
else:
_lowerCamelCase : List[Any] = ClassificationFunction.NONE
_lowerCamelCase : Union[str, Any] = model_outputs['''logits'''][0]
_lowerCamelCase : Dict = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : Dict = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Tuple = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Tuple = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Tuple = dict_scores[:top_k]
return dict_scores
| 72 |
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = min(snake_case )
snake_case_ = max(snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data]
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = mean(snake_case )
snake_case_ = stdev(snake_case )
# standardize data
return [round((x - mu) / (sigma) , snake_case ) for x in data]
| 85 | 0 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : Dict , __a : List[Any]=13 , __a : List[Any]=7 , __a : Tuple=True , __a : Optional[Any]=True , __a : int=True , __a : Union[str, Any]=True , __a : str=99 , __a : Optional[int]=64 , __a : Any=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Optional[int]="gelu" , __a : Optional[Any]=0.1 , __a : List[str]=0.1 , __a : Any=5_12 , __a : Dict=16 , __a : List[str]=2 , __a : Optional[int]=0.02 , __a : int=3 , __a : int=4 , __a : Optional[Any]=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = embedding_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def UpperCamelCase__ ( self : Any ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : str ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self : Tuple , __a : List[Any] , __a : Optional[int] , __a : Optional[int] , __a : Union[str, Any] , __a : List[str] , __a : str , __a : int ):
_a = MobileBertModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
_a = model(__snake_case , token_type_ids=__snake_case )
_a = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self : Dict , __a : str , __a : str , __a : Any , __a : str , __a : str , __a : Optional[int] , __a : Tuple ):
_a = MobileBertForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
_a = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : List[str] , __a : Union[str, Any] , __a : Optional[int] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[int] , __a : List[str] , __a : List[Any] ):
_a = MobileBertForNextSentencePrediction(config=__snake_case )
model.to(__snake_case )
model.eval()
_a = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self : str , __a : Any , __a : Union[str, Any] , __a : int , __a : List[Any] , __a : Optional[int] , __a : Optional[int] , __a : Optional[Any] ):
_a = MobileBertForPreTraining(config=__snake_case )
model.to(__snake_case )
model.eval()
_a = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self : Optional[Any] , __a : Tuple , __a : int , __a : Any , __a : List[str] , __a : Dict , __a : Dict , __a : Tuple ):
_a = MobileBertForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
_a = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Optional[int] , __a : Union[str, Any] , __a : List[str] , __a : Optional[int] , __a : List[str] , __a : str ):
_a = self.num_labels
_a = MobileBertForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
_a = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : List[str] , __a : Dict , __a : Tuple , __a : Optional[Any] , __a : List[str] , __a : Tuple , __a : str , __a : Any ):
_a = self.num_labels
_a = MobileBertForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
_a = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : Optional[Any] , __a : List[str] , __a : int , __a : List[Any] , __a : Optional[int] , __a : List[Any] , __a : Union[str, Any] , __a : Dict ):
_a = self.num_choices
_a = MobileBertForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.prepare_config_and_inputs()
(
_a
) = config_and_inputs
_a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__a =(
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__a =(
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a =True
def UpperCamelCase__ ( self : List[Any] , __a : Tuple , __a : Tuple , __a : Dict=False ):
_a = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class in get_values(__snake_case ):
_a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case )
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def UpperCamelCase__ ( self : Optional[Any] ):
_a = MobileBertModelTester(self )
_a = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def UpperCamelCase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__snake_case )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__snake_case )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__snake_case )
def UpperCamelCase__ ( self : Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__snake_case )
def UpperCamelCase__ ( self : List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__snake_case )
def UpperCamelCase__ ( self : int ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__snake_case )
def UpperCamelCase__ ( self : str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__snake_case )
def UpperCamelCase__ ( self : List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__snake_case )
def _lowerCamelCase ( lowercase : Any ) -> Union[str, Any]:
return torch.tensor(
_A , dtype=torch.long , device=_A , )
lowerCAmelCase_ : Tuple = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : Any ):
_a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__snake_case )
_a = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
_a = model(__snake_case )[0]
_a = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , __snake_case )
_a = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=__snake_case , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 358 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar('T')
lowerCAmelCase_ : Dict = TypeVar('U')
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : T | None , __a : U | None ):
_a = key
_a = val
_a = None
_a = None
def __repr__( self : Any ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Dict ):
_a = DoubleLinkedListNode(__a , __a )
_a = DoubleLinkedListNode(__a , __a )
_a , _a = self.rear, self.head
def __repr__( self : str ):
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(__a ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__a )
def UpperCamelCase__ ( self : int , __a : DoubleLinkedListNode[T, U] ):
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def UpperCamelCase__ ( self : Any , __a : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
__a ={}
def __init__( self : Union[str, Any] , __a : int ):
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self : Optional[int] ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str , __a : T ):
return key in self.cache
def UpperCamelCase__ ( self : str , __a : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__a )
return node.val
self.miss += 1
return None
def UpperCamelCase__ ( self : Tuple , __a : T , __a : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(__a , __a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(__a )
@classmethod
def UpperCamelCase__ ( cls : Tuple , __a : int = 1_28 ):
def cache_decorator_inner(__a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(__a )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*__a )
cls.decorator_function_to_instance_map[func].put(args[0] , __a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__a , "cache_info" , __a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowerCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowerCamelCase : int = [0, 25, 50]
__lowerCamelCase : Tuple = [25, 50, 75]
__lowerCamelCase : List[str] = fuzz.membership.trimf(X, abca)
__lowerCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowerCamelCase : List[str] = np.ones(75)
__lowerCamelCase : Tuple = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowerCamelCase : str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowerCamelCase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowerCamelCase : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowerCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowerCamelCase : List[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowerCamelCase : int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowerCamelCase : int = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowerCamelCase : str = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 219 | import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCamelCase : List[str] = object()
# For specifying empty leaf dict `{}`
__lowerCamelCase : Optional[int] = object()
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__UpperCamelCase ) - len(__UpperCamelCase ) + 1 ):
SCREAMING_SNAKE_CASE__ = [x.match(__UpperCamelCase ) for x, y in zip(__UpperCamelCase , ks[i:] )]
if matches and all(__UpperCamelCase ):
return True
return False
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
def replace(__UpperCamelCase : Tuple , __UpperCamelCase : Any ):
for rule, replacement in rules:
if _match(__UpperCamelCase , __UpperCamelCase ):
return replacement
return val
return replace
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , __UpperCamelCase )),
(("transformer", "wte", "embedding"), P("""mp""" , __UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCamelCase , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , __UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__UpperCamelCase , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , __UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = _get_partition_rules()
SCREAMING_SNAKE_CASE__ = _replacement_rules(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {k: _unmatched for k in flatten_dict(__UpperCamelCase )}
SCREAMING_SNAKE_CASE__ = {k: replace(__UpperCamelCase , __UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__UpperCamelCase ) )
| 219 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 356 |
"""simple docstring"""
import os
import string
import sys
A = 1 << 8
A = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
A = KEYMAP['''up''']
A = KEYMAP['''left''']
if sys.platform == "win32":
A = []
A = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
A = ord(str(i))
def __A ( ) -> Dict:
if os.name == "nt":
import msvcrt
__a : Optional[Any] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(a_) == 0:
# Read the keystroke
__a : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__a : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__a : Union[str, Any] = chr(WIN_KEYMAP[cha])
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int''']))
WIN_CH_BUFFER.append(a_)
if ord(a_) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26))
__a : str = chr(KEYMAP['''esc'''])
except KeyError:
__a : str = cha[1]
else:
__a : Optional[Any] = ch.decode(a_)
else:
__a : Union[str, Any] = WIN_CH_BUFFER.pop(0)
elif os.name == "posix":
import termios
import tty
__a : Any = sys.stdin.fileno()
__a : List[str] = termios.tcgetattr(a_)
try:
tty.setraw(a_)
__a : int = sys.stdin.read(1)
finally:
termios.tcsetattr(a_ , termios.TCSADRAIN , a_)
return ch
def __A ( ) -> str:
__a : Any = get_raw_chars()
if ord(a_) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(a_) == KEYMAP["esc"]:
__a : str = get_raw_chars()
if ord(a_) == KEYMAP["mod_int"]:
__a : List[str] = get_raw_chars()
if ord(a_) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(a_) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(a_) + ARROW_KEY_FLAG)
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 188 | 0 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(UpperCamelCase__ ):
__lowerCamelCase = burst_time[i]
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 9_99_99_99_99
__lowerCamelCase = 0
__lowerCamelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(UpperCamelCase__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__lowerCamelCase = remaining_time[j]
__lowerCamelCase = j
__lowerCamelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__lowerCamelCase = remaining_time[short]
if minm == 0:
__lowerCamelCase = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
__lowerCamelCase = False
# Find finish time of current process
__lowerCamelCase = increment_time + 1
# Calculate waiting time
__lowerCamelCase = finish_time - arrival_time[short]
__lowerCamelCase = finar - burst_time[short]
if waiting_time[short] < 0:
__lowerCamelCase = 0
# Increment time
increment_time += 1
return waiting_time
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__lowerCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
__lowerCamelCase = 0
__lowerCamelCase = 0
for i in range(UpperCamelCase__ ):
__lowerCamelCase = total_waiting_time + waiting_time[i]
__lowerCamelCase = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
__UpperCAmelCase =int(input())
__UpperCAmelCase =[0] * no_of_processes
__UpperCAmelCase =[0] * no_of_processes
__UpperCAmelCase =list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
__UpperCAmelCase , __UpperCAmelCase =map(int, input().split())
__UpperCAmelCase =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__UpperCAmelCase =burst_time
__UpperCAmelCase =no_of_processes
__UpperCAmelCase =waiting_time
__UpperCAmelCase =calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__UpperCAmelCase =pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 67 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a__ :
def __init__( self : Union[str, Any] , a : Union[str, Any] , a : Tuple=13 , a : Optional[Any]=7 , a : List[Any]=True , a : Optional[Any]=True , a : Any=True , a : Union[str, Any]=99 , a : Any=32 , a : int=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Optional[Any]="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Optional[int]=5_12 , a : int=16 , a : Optional[Any]=2 , a : Union[str, Any]=0.02 , a : Any=3 , a : Dict=4 , a : Any=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : List[Any] , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , head_mask=a )
__lowerCamelCase = model(a , token_type_ids=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, Any] , a : Dict , a : Union[str, Any] , a : Tuple , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , *a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTDoubleHeadsModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int , a : Dict , a : Optional[Any] , a : str , *a : int ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = OpenAIGPTForSequenceClassification(a )
model.to(a )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[str] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase : str =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase : Optional[int] =(
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple , a : Optional[int] , a : int , a : str , a : Any ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int , a : Optional[int] , a : str=False ):
"""simple docstring"""
__lowerCamelCase = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a , )
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a , )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = OpenAIGPTModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(a )
__lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=a ) # the president is
__lowerCamelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowerCamelCase = model.generate(a , do_sample=a )
self.assertListEqual(output_ids[0].tolist() , a )
| 67 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__A ='''src/diffusers'''
__A ='''.'''
# This is to make sure the diffusers module imported is the one in the repo.
__A =importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
__A =spec.loader.load_module()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
return line.startswith(lowerCamelCase__ ) or len(lowerCamelCase__ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , lowerCamelCase__ ) is not None
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = object_name.split("." )
lowerCamelCase_ = 0
# First let's find the module where our object lives.
lowerCamelCase_ = parts[i]
while i < len(lowerCamelCase__ ) and not os.path.isfile(os.path.join(lowerCamelCase__ , F'{module}.py' ) ):
i += 1
if i < len(lowerCamelCase__ ):
lowerCamelCase_ = os.path.join(lowerCamelCase__ , parts[i] )
if i >= len(lowerCamelCase__ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCamelCase__ , F'{module}.py' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
# Now let's find the class / func in the code!
lowerCamelCase_ = ""
lowerCamelCase_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCamelCase__ ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCamelCase__ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCamelCase_ = line_index
while line_index < len(lowerCamelCase__ ) and _should_continue(lines[line_index] , lowerCamelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase_ = lines[start_index:line_index]
return "".join(lowerCamelCase__ )
__A =re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
__A =re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
__A =re.compile(R'''<FILL\s+[^>]*>''')
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = code.split("\n" )
lowerCamelCase_ = 0
while idx < len(lowerCamelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCamelCase__ ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(get_indent(lowerCamelCase__ ) ) > 0
if has_indent:
lowerCamelCase_ = F'class Bla:\n{code}'
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=lowerCamelCase__ )
lowerCamelCase_ = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = style_docstrings_in_code(lowerCamelCase__ )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
with open(lowerCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = []
lowerCamelCase_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCamelCase__ ):
lowerCamelCase_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = search.groups()
lowerCamelCase_ = find_code_in_diffusers(lowerCamelCase__ )
lowerCamelCase_ = get_indent(lowerCamelCase__ )
lowerCamelCase_ = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCamelCase_ = theoretical_indent
lowerCamelCase_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCamelCase_ = True
while line_index < len(lowerCamelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCamelCase__ ):
break
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _should_continue(lowerCamelCase__ , lowerCamelCase__ ) and re.search(F'^{indent}# End copy' , lowerCamelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase_ = lines[start_index:line_index]
lowerCamelCase_ = "".join(lowerCamelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCamelCase_ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowerCamelCase__ ) is None]
lowerCamelCase_ = "\n".join(lowerCamelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCamelCase__ ) > 0:
lowerCamelCase_ = replace_pattern.replace("with" , "" ).split("," )
lowerCamelCase_ = [_re_replace_pattern.search(lowerCamelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = pattern.groups()
lowerCamelCase_ = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if option.strip() == "all-casing":
lowerCamelCase_ = re.sub(obja.lower() , obja.lower() , lowerCamelCase__ )
lowerCamelCase_ = re.sub(obja.upper() , obja.upper() , lowerCamelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCamelCase_ = blackify(lines[start_index - 1] + theoretical_code )
lowerCamelCase_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCamelCase_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCamelCase_ = start_index + 1
if overwrite and len(lowerCamelCase__ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(lowerCamelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCamelCase__ )
return diffs
def lowerCamelCase_ ( lowerCamelCase__ = False ):
lowerCamelCase_ = glob.glob(os.path.join(lowerCamelCase__ , "**/*.py" ) , recursive=lowerCamelCase__ )
lowerCamelCase_ = []
for filename in all_files:
lowerCamelCase_ = is_copy_consistent(lowerCamelCase__ , lowerCamelCase__ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCamelCase__ ) > 0:
lowerCamelCase_ = "\n".join(lowerCamelCase__ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__A =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 47 |
import copy
import re
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = 'hp'
lowerCAmelCase__ = {}
lowerCAmelCase__ = None
@classmethod
def SCREAMING_SNAKE_CASE_( cls , lowercase , lowercase ) -> Tuple:
lowerCamelCase_ = prefix
lowerCamelCase_ = defaults
cls.build_naming_info()
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> Optional[Any]:
if len(lowercase ) == 0:
return ""
lowerCamelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowercase ) + 1 ):
lowerCamelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowerCamelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowercase ):
lowerCamelCase_ = ""
while integer != 0:
lowerCamelCase_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
lowerCamelCase_ = 0
while True:
lowerCamelCase_ = word + "#" + int_to_alphabetic(lowercase )
if sword in info["reverse_short_word"]:
continue
else:
lowerCamelCase_ = sword
break
lowerCamelCase_ = short_word
lowerCamelCase_ = word
return short_word
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> int:
lowerCamelCase_ = param_name.split("_" )
lowerCamelCase_ = [TrialShortNamer.shortname_for_word(lowercase , lowercase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowerCamelCase_ = ["", "_"]
for separator in separators:
lowerCamelCase_ = separator.join(lowercase )
if shortname not in info["reverse_short_param"]:
lowerCamelCase_ = shortname
lowerCamelCase_ = param_name
return shortname
return param_name
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> Optional[Any]:
lowerCamelCase_ = TrialShortNamer.shortname_for_key(lowercase , lowercase )
lowerCamelCase_ = short_name
lowerCamelCase_ = param_name
@classmethod
def SCREAMING_SNAKE_CASE_( cls ) -> Dict:
if cls.NAMING_INFO is not None:
return
lowerCamelCase_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
lowerCamelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowercase , lowercase )
lowerCamelCase_ = info
@classmethod
def SCREAMING_SNAKE_CASE_( cls , lowercase ) -> Optional[int]:
cls.build_naming_info()
assert cls.PREFIX is not None
lowerCamelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowerCamelCase_ = cls.NAMING_INFO["short_param"][k]
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = 1 if v else 0
lowerCamelCase_ = "" if isinstance(lowercase , (int, float) ) else "-"
lowerCamelCase_ = f'{key}{sep}{v}'
name.append(lowercase )
return "_".join(lowercase )
@classmethod
def SCREAMING_SNAKE_CASE_( cls , lowercase ) -> List[Any]:
lowerCamelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowerCamelCase_ = []
else:
lowerCamelCase_ = repr.split("_" )
lowerCamelCase_ = {}
for value in values:
if "-" in value:
lowerCamelCase_ , lowerCamelCase_ = value.split("-" )
else:
lowerCamelCase_ = re.sub("[0-9.]" , "" , lowercase )
lowerCamelCase_ = float(re.sub("[^0-9.]" , "" , lowercase ) )
lowerCamelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k]
lowerCamelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowerCamelCase_ = cls.DEFAULTS[k]
return parameters
| 47 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "trajectory_transformer"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __A=100 , __A=5 , __A=1 , __A=1 , __A=249 , __A=6 , __A=17 , __A=25 , __A=4 , __A=4 , __A=128 , __A=0.1 , __A=0.1 , __A=0.1 , __A=0.0_006 , __A=512 , __A=0.02 , __A=1E-1_2 , __A=1 , __A=True , __A=1 , __A=5_0256 , __A=5_0256 , **__A , ) -> List[Any]:
a =vocab_size
a =action_weight
a =reward_weight
a =value_weight
a =max_position_embeddings
a =block_size
a =action_dim
a =observation_dim
a =transition_dim
a =learning_rate
a =n_layer
a =n_head
a =n_embd
a =embd_pdrop
a =attn_pdrop
a =resid_pdrop
a =initializer_range
a =layer_norm_eps
a =kaiming_initializer_range
a =use_cache
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) | 81 | """simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''input_values''', '''padding_mask''']
def __init__( self : Optional[Any] , __UpperCamelCase : int = 1 , __UpperCamelCase : int = 2_4000 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = None , __UpperCamelCase : float = None , **__UpperCamelCase : Optional[Any] , ) -> Optional[int]:
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = chunk_length_s
_UpperCamelCase = overlap
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Union[str, Any] , __UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None , __UpperCamelCase : Optional[bool] = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Optional[int] = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
_UpperCamelCase = True
_UpperCamelCase = bool(
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_UpperCamelCase = [np.asarray(__UpperCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
_UpperCamelCase = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_UpperCamelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCamelCase = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
_UpperCamelCase = None
_UpperCamelCase = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_UpperCamelCase = min(array.shape[0] for array in raw_audio )
_UpperCamelCase = int(np.floor(max_length / self.chunk_stride ) )
_UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_UpperCamelCase = max(array.shape[0] for array in raw_audio )
_UpperCamelCase = int(np.ceil(max_length / self.chunk_stride ) )
_UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_UpperCamelCase = '''max_length'''
else:
_UpperCamelCase = input_values
# normal padding on batch
if padded_inputs is None:
_UpperCamelCase = self.pad(
__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , padding=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
if padding:
_UpperCamelCase = padded_inputs.pop('''attention_mask''' )
_UpperCamelCase = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
_UpperCamelCase = example[..., None]
input_values.append(example.T )
_UpperCamelCase = input_values
if return_tensors is not None:
_UpperCamelCase = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 256 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_UpperCAmelCase = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , torch_builtin(_SCREAMING_SNAKE_CASE ) ) )
self.assertFalse(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , gelu_new(_SCREAMING_SNAKE_CASE ) ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_UpperCAmelCase = get_activation('gelu' )
_UpperCAmelCase = get_activation('gelu_10' )
_UpperCAmelCase = torch_builtin(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = geluaa(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_SCREAMING_SNAKE_CASE ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
get_activation('bogus' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
get_activation(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = get_activation('gelu' )
_UpperCAmelCase = 1
_UpperCAmelCase = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = acta.a
| 185 |
import math
import random
def lowerCAmelCase__ ( a__: float , a__: bool = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase__ :Optional[Any] = 0.02
def lowerCAmelCase__ ( a__: int , a__: int ) -> float:
'''simple docstring'''
_UpperCAmelCase = float(2 * (random.randint(1 , 1_0_0 )) - 1 )
for _ in range(a__ ):
# Forward propagation
_UpperCAmelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_UpperCAmelCase = (expected / 1_0_0) - layer_a
# Error delta
_UpperCAmelCase = layer_1_error * sigmoid_function(a__ , a__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_0_0
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ :List[Any] = int(input('''Expected value: '''))
lowerCAmelCase__ :Any = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 185 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__lowerCAmelCase : int = False
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any]=32 ) -> List[str]:
"""simple docstring"""
set_seed(0 )
__magic_name__ = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
__magic_name__ = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__magic_name__ = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
__magic_name__ = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__magic_name__ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
__magic_name__ = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
__magic_name__ = [torch.randint(0 , 1000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
__magic_name__ , __magic_name__ = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__magic_name__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__magic_name__ = model(UpperCamelCase__ , timesteps[i] ).sample
__magic_name__ = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__magic_name__ , __magic_name__ = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__magic_name__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__magic_name__ = model(UpperCamelCase__ , timesteps[i] ).sample
__magic_name__ = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 88 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 346 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
lowerCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
lowerCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return float((preds == labels).mean() )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="binary" ):
"""simple docstring"""
lowercase__ = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ , average=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
for id_pred, label in zip(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
lowercase__ = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowercase__ = [(pred, label)]
lowercase__ = [], []
for question, preds_labels in question_map.items():
lowercase__ = zip(*lowerCamelCase__ )
lowercase__ = fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ , average='''macro''' )
fas.append(lowerCamelCase__ )
lowercase__ = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase__ ) )
ems.append(lowerCamelCase__ )
lowercase__ = float(sum(lowerCamelCase__ ) / len(lowerCamelCase__ ) )
lowercase__ = sum(lowerCamelCase__ ) / len(lowerCamelCase__ )
lowercase__ = float(fa_score(y_true=lowerCamelCase__ , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple , UpperCamelCase_: Dict ) -> Any:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase , _lowerCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(_lowerCamelCase , _lowerCamelCase , fa_avg='''macro''' )
elif self.config_name == "record":
lowercase__ = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowercase__ = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_lowerCamelCase , _lowerCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowerCamelCase , _lowerCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 354 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 93 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a : Union[str, Any] = logging.get_logger(__name__)
a : Union[str, Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
a : List[Any] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCAmelCase : Tuple = "lm_head"
UpperCAmelCase : List[Any] = getattr(_A , _A )
if weight_type is not None:
UpperCAmelCase : List[Any] = getattr(_A , _A ).shape
else:
UpperCAmelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCAmelCase : Any = value
elif weight_type == "weight_g":
UpperCAmelCase : List[Any] = value
elif weight_type == "weight_v":
UpperCAmelCase : Any = value
elif weight_type == "bias":
UpperCAmelCase : int = value
else:
UpperCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = []
UpperCAmelCase : Any = fairseq_model.state_dict()
UpperCAmelCase : List[Any] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : int = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase : Union[str, Any] = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
UpperCAmelCase : Dict = name.split(_A )[0].split("." )[-2]
UpperCAmelCase : Union[str, Any] = mapped_key.replace("*" , _A )
if "weight_g" in name:
UpperCAmelCase : Tuple = "weight_g"
elif "weight_v" in name:
UpperCAmelCase : Optional[Any] = "weight_v"
elif "bias" in name:
UpperCAmelCase : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : int = "weight"
else:
UpperCAmelCase : Optional[int] = None
set_recursively(_A , _A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(F"Unused weights: {unused_weights}" )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = full_name.split("conv_layers." )[-1]
UpperCAmelCase : Union[str, Any] = name.split("." )
UpperCAmelCase : Tuple = int(items[0] )
UpperCAmelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCAmelCase : Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCAmelCase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCAmelCase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCAmelCase : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_A )
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=True ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase : List[str] = UniSpeechConfig.from_pretrained(_A )
else:
UpperCAmelCase : Optional[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCAmelCase : List[Any] = Dictionary.load_from_json(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase : Any = target_dict.pad_index
UpperCAmelCase : Tuple = target_dict.bos_index
UpperCAmelCase : Tuple = target_dict.eos_index
UpperCAmelCase : Any = len(target_dict.symbols )
UpperCAmelCase : Dict = os.path.join(_A , "vocab.json" )
if not os.path.isdir(_A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
UpperCAmelCase : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase : Any = 42
UpperCAmelCase : str = 43
with open(_A , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_A , _A )
UpperCAmelCase : str = WavaVecaPhonemeCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_A , )
UpperCAmelCase : Tuple = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
UpperCAmelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
UpperCAmelCase : int = UniSpeechForCTC(_A )
else:
UpperCAmelCase : List[Any] = UniSpeechForPreTraining(_A )
if is_finetuned:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCAmelCase : List[str] = model[0].eval()
recursively_load_weights(_A , _A , _A )
hf_unispeech.save_pretrained(_A )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
a : str = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 311 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_=False, lowercase_=False, lowercase_=2, lowercase_=99, lowercase_=0, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=12, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_="last", lowercase_=None, lowercase_=None, ) -> List[Any]:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_lengths
a__ =use_token_type_ids
a__ =use_labels
a__ =gelu_activation
a__ =sinusoidal_embeddings
a__ =causal
a__ =asm
a__ =n_langs
a__ =vocab_size
a__ =n_special
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_labels
a__ =num_choices
a__ =summary_type
a__ =use_proj
a__ =scope
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
a__ =random_attention_mask([self.batch_size, self.seq_length] )
a__ =None
if self.use_input_lengths:
a__ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
a__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
a__ =ids_tensor([self.batch_size], 2 ).float()
a__ =ids_tensor([self.batch_size], self.num_choices )
a__ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, lengths=lowercase_, langs=lowercase_ )
a__ =model(lowercase_, langs=lowercase_ )
a__ =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> str:
"""simple docstring"""
a__ =FlaubertWithLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertForQuestionAnsweringSimple(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, p_mask=lowercase_, )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, )
((a__), ) =result_with_labels.to_tuple()
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
((a__), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[int]:
"""simple docstring"""
a__ =self.num_labels
a__ =FlaubertForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =self.num_choices
a__ =FlaubertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =model(
lowercase_, attention_mask=lowercase_, token_type_ids=lowercase_, labels=lowercase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
),
) =config_and_inputs
a__ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_=False ) -> str:
"""simple docstring"""
a__ =super()._prepare_for_class(lowercase_, lowercase_, return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
return inputs_dict
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =FlaubertModelTester(self )
a__ =ConfigTester(self, config_class=lowercase_, emb_dim=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_ )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase_ )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =FlaubertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a__ =True
a__ =model_class(config=lowercase_ )
a__ =self._prepare_for_class(lowercase_, lowercase_ )
a__ =torch.jit.trace(
lowercase_, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_, os.path.join(lowercase_, '''traced_model.pt''' ) )
a__ =torch.jit.load(os.path.join(lowercase_, '''traced_model.pt''' ), map_location=lowercase_ )
loaded(inputs_dict['''input_ids'''].to(lowercase_ ), inputs_dict['''attention_mask'''].to(lowercase_ ) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
a__ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
a__ =model(lowercase_ )[0]
a__ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowercase_ )
a__ =torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowercase_, atol=1E-4 ) )
| 188 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 'codegen'
UpperCAmelCase_ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , lowercase__ : Any=50_400 , lowercase__ : int=2_048 , lowercase__ : Tuple=2_048 , lowercase__ : Optional[Any]=4_096 , lowercase__ : Dict=28 , lowercase__ : List[str]=16 , lowercase__ : Union[str, Any]=64 , lowercase__ : str=None , lowercase__ : Tuple="gelu_new" , lowercase__ : str=0.0 , lowercase__ : str=0.0 , lowercase__ : Optional[Any]=0.0 , lowercase__ : Any=1e-5 , lowercase__ : Dict=0.02 , lowercase__ : str=True , lowercase__ : str=50_256 , lowercase__ : Optional[int]=50_256 , lowercase__ : int=False , **lowercase__ : Optional[int] , ):
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = n_ctx
lowerCAmelCase__ = n_positions
lowerCAmelCase__ = n_embd
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = n_inner
lowerCAmelCase__ = rotary_dim
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = resid_pdrop
lowerCAmelCase__ = embd_pdrop
lowerCAmelCase__ = attn_pdrop
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
super().__init__(
bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , lowercase__ : PretrainedConfig , lowercase__ : str = "default" , lowercase__ : List[PatchingSpec] = None , lowercase__ : bool = False , ):
'''simple docstring'''
super().__init__(lowercase__ , task=lowercase__ , patching_specs=lowercase__ , use_past=lowercase__)
if not getattr(self._config , 'pad_token_id' , lowercase__):
# TODO: how to do that better?
lowerCAmelCase__ = 0
@property
def __snake_case ( self : str):
'''simple docstring'''
lowerCAmelCase__ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction='inputs')
lowerCAmelCase__ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCAmelCase__ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __snake_case ( self : List[Any]):
'''simple docstring'''
return self._config.n_layer
@property
def __snake_case ( self : Optional[int]):
'''simple docstring'''
return self._config.n_head
def __snake_case ( self : Union[str, Any] , lowercase__ : PreTrainedTokenizer , lowercase__ : int = -1 , lowercase__ : int = -1 , lowercase__ : bool = False , lowercase__ : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCAmelCase__ = super(lowercase__ , self).generate_dummy_inputs(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__)
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCAmelCase__ = seqlen + 2
lowerCAmelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ = [
(torch.zeros(lowercase__), torch.zeros(lowercase__)) for _ in range(self.num_layers)
]
lowerCAmelCase__ = common_inputs['attention_mask']
if self.use_past:
lowerCAmelCase__ = ordered_inputs['attention_mask'].dtype
lowerCAmelCase__ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__)] , dim=1)
return ordered_inputs
@property
def __snake_case ( self : Dict):
'''simple docstring'''
return 13
| 361 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 'huggingface/label-files'
lowerCAmelCase__ = 'imagenet-1k-id2label.json'
lowerCAmelCase__ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase__ = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1_0_0_0 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def __lowerCamelCase ( lowerCAmelCase__ ):
if "stem.conv" in name:
lowerCAmelCase__ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowerCAmelCase__ = name.replace('blocks' , 'layers' )
if "head.fc" in name:
lowerCAmelCase__ = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
lowerCAmelCase__ = 'bit.' + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase__ = 'bit.encoder.' + name
return name
def __lowerCamelCase ( ):
lowerCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
lowerCAmelCase__ = get_config(lowerCAmelCase__ )
# load original model from timm
lowerCAmelCase__ = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase__ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase__ = state_dict.pop(lowerCAmelCase__ )
lowerCAmelCase__ = val.squeeze() if 'head' in key else val
# load HuggingFace model
lowerCAmelCase__ = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
lowerCAmelCase__ = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
lowerCAmelCase__ = transform.transforms
lowerCAmelCase__ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCAmelCase__ = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = transform(lowerCAmelCase__ ).unsqueeze(0 )
lowerCAmelCase__ = processor(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
lowerCAmelCase__ = model(lowerCAmelCase__ )
lowerCAmelCase__ = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase__ = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 119 | 0 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase : int = Mapping[str, np.ndarray]
lowerCamelCase : Union[str, Any] = Mapping[str, Any] # Is a nested dict.
lowerCamelCase : Dict = 0.0_1
@dataclasses.dataclass(frozen=A__ )
class A__ :
A__ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ = None
# Templates used to generate this protein (prediction-only)
A__ = None
# Chain corresponding to each parent
A__ = None
def _lowerCAmelCase ( _UpperCamelCase : str ) -> Protein:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =r'(\[[A-Z]+\]\n)'
_SCREAMING_SNAKE_CASE =[tag.strip() for tag in re.split(_UpperCamelCase , _UpperCamelCase ) if len(_UpperCamelCase ) > 0]
_SCREAMING_SNAKE_CASE =zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
_SCREAMING_SNAKE_CASE =["N", "CA", "C"]
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
for g in groups:
if "[PRIMARY]" == g[0]:
_SCREAMING_SNAKE_CASE =g[1][0].strip()
for i in range(len(_UpperCamelCase ) ):
if seq[i] not in residue_constants.restypes:
_SCREAMING_SNAKE_CASE ='X' # FIXME: strings are immutable
_SCREAMING_SNAKE_CASE =np.array(
[residue_constants.restype_order.get(_UpperCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_SCREAMING_SNAKE_CASE =[]
for axis in range(3 ):
tertiary.append(list(map(_UpperCamelCase , g[1][axis].split() ) ) )
_SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_SCREAMING_SNAKE_CASE =np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
_SCREAMING_SNAKE_CASE =np.zeros(
(
len(_UpperCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_UpperCamelCase , atom_mask=_UpperCamelCase , aatype=_UpperCamelCase , residue_index=np.arange(len(_UpperCamelCase ) ) , b_factors=_UpperCamelCase , )
def _lowerCAmelCase ( _UpperCamelCase : Protein , _UpperCamelCase : int = 0 ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
_SCREAMING_SNAKE_CASE =prot.parents
_SCREAMING_SNAKE_CASE =prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_SCREAMING_SNAKE_CASE =[p for i, p in zip(_UpperCamelCase , _UpperCamelCase ) if i == chain_id]
if parents is None or len(_UpperCamelCase ) == 0:
_SCREAMING_SNAKE_CASE =['N/A']
pdb_headers.append(f"PARENT {' '.join(_UpperCamelCase )}" )
return pdb_headers
def _lowerCAmelCase ( _UpperCamelCase : Protein , _UpperCamelCase : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =pdb_str.split('\n' )
_SCREAMING_SNAKE_CASE =prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
_SCREAMING_SNAKE_CASE =42
if prot.parents is not None and len(prot.parents ) > 0:
_SCREAMING_SNAKE_CASE =[]
if prot.parents_chain_index is not None:
_SCREAMING_SNAKE_CASE ={}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_UpperCamelCase ) , [] )
parent_dict[str(_UpperCamelCase )].append(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =max([int(_UpperCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_SCREAMING_SNAKE_CASE =parent_dict.get(str(_UpperCamelCase ) , ['N/A'] )
parents_per_chain.append(_UpperCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_SCREAMING_SNAKE_CASE =[['N/A']]
def make_parent_line(_UpperCamelCase : Sequence[str] ) -> str:
return f"PARENT {' '.join(_UpperCamelCase )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_SCREAMING_SNAKE_CASE =0
for i, l in enumerate(_UpperCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_UpperCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =parents_per_chain[chain_counter]
else:
_SCREAMING_SNAKE_CASE =['N/A']
out_pdb_lines.append(make_parent_line(_UpperCamelCase ) )
return "\n".join(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Protein ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =residue_constants.restypes + ['X']
def res_atoa(_UpperCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
_SCREAMING_SNAKE_CASE =residue_constants.atom_types
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =prot.atom_mask
_SCREAMING_SNAKE_CASE =prot.aatype
_SCREAMING_SNAKE_CASE =prot.atom_positions
_SCREAMING_SNAKE_CASE =prot.residue_index.astype(np.intaa )
_SCREAMING_SNAKE_CASE =prot.b_factors
_SCREAMING_SNAKE_CASE =prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
_SCREAMING_SNAKE_CASE =get_pdb_headers(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
pdb_lines.extend(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =aatype.shape[0]
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =string.ascii_uppercase
_SCREAMING_SNAKE_CASE =None
# Add all atom sites.
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_UpperCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_SCREAMING_SNAKE_CASE ='ATOM'
_SCREAMING_SNAKE_CASE =atom_name if len(_UpperCamelCase ) == 4 else f" {atom_name}"
_SCREAMING_SNAKE_CASE =''
_SCREAMING_SNAKE_CASE =''
_SCREAMING_SNAKE_CASE =1.00
_SCREAMING_SNAKE_CASE =atom_name[0] # Protein supports only C, N, O, S, this works.
_SCREAMING_SNAKE_CASE =''
_SCREAMING_SNAKE_CASE ='A'
if chain_index is not None:
_SCREAMING_SNAKE_CASE =chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_SCREAMING_SNAKE_CASE =(
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(_UpperCamelCase )
atom_index += 1
_SCREAMING_SNAKE_CASE =i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =chain_index[i + 1]
if should_terminate:
# Close the chain.
_SCREAMING_SNAKE_CASE ='TER'
_SCREAMING_SNAKE_CASE =(
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(_UpperCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_UpperCamelCase , _UpperCamelCase ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Protein ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _lowerCAmelCase ( _UpperCamelCase : FeatureDict , _UpperCamelCase : ModelOutput , _UpperCamelCase : Optional[np.ndarray] = None , _UpperCamelCase : Optional[np.ndarray] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Sequence[str]] = None , _UpperCamelCase : Optional[Sequence[int]] = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=_UpperCamelCase , remark=_UpperCamelCase , parents=_UpperCamelCase , parents_chain_index=_UpperCamelCase , )
| 47 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCamelCase : Optional[int] = False
class A__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def A ( self : Tuple ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(
image=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
_SCREAMING_SNAKE_CASE =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE =np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 47 | 1 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : str ):
'''simple docstring'''
snake_case_ = np.argmax(snake_case , axis=1 )
return np.sum(outputs == labels )
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
with open(snake_case , encoding="utf_8" ) as f:
snake_case_ = csv.reader(snake_case )
snake_case_ = []
next(snake_case ) # skip the first line
for line in tqdm(snake_case ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Tuple , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = []
for dataset in encoded_datasets:
snake_case_ = len(snake_case )
snake_case_ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
snake_case_ = np.zeros((n_batch, 2) , dtype=np.intaa )
snake_case_ = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
snake_case_ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(snake_case ):
snake_case_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ = with_conta
snake_case_ = with_conta
snake_case_ = len(snake_case ) - 1
snake_case_ = len(snake_case ) - 1
snake_case_ = with_conta
snake_case_ = with_conta
snake_case_ = mc_label
snake_case_ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(snake_case ) for t in all_inputs ) )
return tensor_datasets
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=snake_case , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=snake_case , type=snake_case , required=snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=snake_case , default="" )
parser.add_argument("--eval_dataset" , type=snake_case , default="" )
parser.add_argument("--seed" , type=snake_case , default=4_2 )
parser.add_argument("--num_train_epochs" , type=snake_case , default=3 )
parser.add_argument("--train_batch_size" , type=snake_case , default=8 )
parser.add_argument("--eval_batch_size" , type=snake_case , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1e-8 , type=snake_case , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=snake_case , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=snake_case , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=snake_case , default=6.25e-5 )
parser.add_argument("--warmup_steps" , default=0 , type=snake_case , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=snake_case , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=snake_case , default=0.01 )
parser.add_argument("--lm_coef" , type=snake_case , default=0.9 )
parser.add_argument("--n_valid" , type=snake_case , default=3_7_4 )
parser.add_argument("--server_ip" , type=snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=snake_case , default="" , help="Can be used for distant debugging." )
snake_case_ = parser.parse_args()
print(snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
snake_case_ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
snake_case_ = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(snake_case , snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
snake_case_ = ["_start_", "_delimiter_", "_classify_"]
snake_case_ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(snake_case )
snake_case_ = tokenizer.convert_tokens_to_ids(snake_case )
snake_case_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(snake_case ) )
model.to(snake_case )
# Load and encode the datasets
def tokenize_and_encode(snake_case : Optional[Any] ):
if isinstance(snake_case , snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(snake_case ) )
elif isinstance(snake_case , snake_case ):
return obj
return [tokenize_and_encode(snake_case ) for o in obj]
logger.info("Encoding dataset..." )
snake_case_ = load_rocstories_dataset(args.train_dataset )
snake_case_ = load_rocstories_dataset(args.eval_dataset )
snake_case_ = (train_dataset, eval_dataset)
snake_case_ = tokenize_and_encode(snake_case )
# Compute the max input length for the Transformer
snake_case_ = model.config.n_positions // 2 - 2
snake_case_ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
snake_case_ = min(snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
snake_case_ = pre_process_datasets(snake_case , snake_case , snake_case , *snake_case )
snake_case_ , snake_case_ = tensor_datasets[0], tensor_datasets[1]
snake_case_ = TensorDataset(*snake_case )
snake_case_ = RandomSampler(snake_case )
snake_case_ = DataLoader(snake_case , sampler=snake_case , batch_size=args.train_batch_size )
snake_case_ = TensorDataset(*snake_case )
snake_case_ = SequentialSampler(snake_case )
snake_case_ = DataLoader(snake_case , sampler=snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
snake_case_ = args.max_steps
snake_case_ = args.max_steps // (len(snake_case ) // args.gradient_accumulation_steps) + 1
else:
snake_case_ = len(snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
snake_case_ = list(model.named_parameters() )
snake_case_ = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
snake_case_ = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
snake_case_ = AdamW(snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
snake_case_ = get_linear_schedule_with_warmup(
snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=snake_case )
if args.do_train:
snake_case_ , snake_case_ , snake_case_ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
snake_case_ = 0
snake_case_ = 0
snake_case_ = tqdm(snake_case , desc="Training" )
for step, batch in enumerate(snake_case ):
snake_case_ = tuple(t.to(snake_case ) for t in batch )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = batch
snake_case_ = model(snake_case , mc_token_ids=snake_case , lm_labels=snake_case , mc_labels=snake_case )
snake_case_ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
snake_case_ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
snake_case_ = "Training loss: {:.2e} lr: {:.2e}".format(snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
snake_case_ = model.module if hasattr(snake_case , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
snake_case_ = os.path.join(args.output_dir , snake_case )
snake_case_ = os.path.join(args.output_dir , snake_case )
torch.save(model_to_save.state_dict() , snake_case )
model_to_save.config.to_json_file(snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
snake_case_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
snake_case_ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(snake_case )
if args.do_eval:
model.eval()
snake_case_ , snake_case_ = 0, 0
snake_case_ , snake_case_ = 0, 0
for batch in tqdm(snake_case , desc="Evaluating" ):
snake_case_ = tuple(t.to(snake_case ) for t in batch )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = batch
with torch.no_grad():
snake_case_ , snake_case_ , snake_case_ , snake_case_ = model(
snake_case , mc_token_ids=snake_case , lm_labels=snake_case , mc_labels=snake_case )
snake_case_ = mc_logits.detach().cpu().numpy()
snake_case_ = mc_labels.to("cpu" ).numpy()
snake_case_ = accuracy(snake_case , snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
snake_case_ = eval_loss / nb_eval_steps
snake_case_ = eval_accuracy / nb_eval_examples
snake_case_ = tr_loss / nb_tr_steps if args.do_train else None
snake_case_ = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
snake_case_ = os.path.join(args.output_dir , "eval_results.txt" )
with open(snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , snake_case , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 92 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_SCREAMING_SNAKE_CASE : Any = False
class _snake_case ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
image=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
snake_case_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 92 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A__ : Union[str, Any] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Dict = PegasusConfig
lowerCamelCase : List[Any] = {}
lowerCamelCase : List[Any] = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , ) -> int:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : Tuple = batch_size
__lowerCamelCase : Tuple = seq_length
__lowerCamelCase : Dict = is_training
__lowerCamelCase : Any = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : Dict = hidden_size
__lowerCamelCase : Union[str, Any] = num_hidden_layers
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : int = eos_token_id
__lowerCamelCase : Any = pad_token_id
__lowerCamelCase : Dict = bos_token_id
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowerCamelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCamelCase : Dict = prepare_pegasus_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return config, inputs_dict
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Union[str, Any] = 20
__lowerCamelCase : Optional[int] = model_class_name(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = model.encode(inputs_dict['input_ids'] )
__lowerCamelCase , __lowerCamelCase : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCamelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase : str = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = model.decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : List[str] = 20
__lowerCamelCase : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = model.encode(inputs_dict['input_ids'] )
__lowerCamelCase , __lowerCamelCase : Dict = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCamelCase : List[str] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCamelCase : int = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[str] = model.decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[Any]=None , ) -> Optional[int]:
if attention_mask is None:
__lowerCamelCase : int = np.not_equal(UpperCAmelCase_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowerCamelCase : List[Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCamelCase : str = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : Tuple = False
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = FlaxPegasusModelTester(self )
__lowerCamelCase : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def encode_jitted(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
return model.encode(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : int = encode_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Any = encode_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : Any = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__lowerCamelCase : Dict = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return model.decode(
decoder_input_ids=SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , encoder_outputs=SCREAMING_SNAKE_CASE_ , )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Dict = decode_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : str = decode_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[str] = model_class_name.from_pretrained('google/pegasus-large' , from_pt=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = np.ones((1, 1) )
__lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Dict = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
__lowerCamelCase : Union[str, Any] = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
__lowerCamelCase : Dict = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
__lowerCamelCase : List[str] = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
__lowerCamelCase : str = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='np' , truncation=SCREAMING_SNAKE_CASE_ , max_length=5_12 , padding=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = model.generate(**SCREAMING_SNAKE_CASE_ , num_beams=2 ).sequences
__lowerCamelCase : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
assert tgt_text == decoded
| 185 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Any = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
__lowerCamelCase : Tuple = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(SCREAMING_SNAKE_CASE_ )
from datasets import load_dataset
__lowerCamelCase : str = load_dataset('nielsr/rvlcdip-demo' )
__lowerCamelCase : List[Any] = dataset['train'][0]['image'].convert('RGB' )
__lowerCamelCase : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__lowerCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = outputs.logits
__lowerCamelCase : List[Any] = torch.Size((1, 16) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=SCREAMING_SNAKE_CASE_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 185 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : str = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 259 |
"""simple docstring"""
from collections.abc import Callable
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = a
__lowerCAmelCase = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
__lowerCAmelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
__lowerCAmelCase = mid
else:
__lowerCAmelCase = mid
__lowerCAmelCase = start + (end - start) / 2.0
return mid
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 259 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Any = """codegen"""
lowerCAmelCase__ : Union[str, Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__(self : Any , UpperCamelCase : List[Any]=50400 , UpperCamelCase : Optional[Any]=2048 , UpperCamelCase : List[Any]=2048 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Union[str, Any]=28 , UpperCamelCase : Optional[Any]=16 , UpperCamelCase : Dict=64 , UpperCamelCase : Tuple=None , UpperCamelCase : Optional[int]="gelu_new" , UpperCamelCase : str=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : int=1E-5 , UpperCamelCase : str=0.02 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[Any]=50256 , UpperCamelCase : Dict=50256 , UpperCamelCase : int=False , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = n_ctx
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = n_inner
lowercase__ = rotary_dim
lowercase__ = activation_function
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = attn_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = bos_token_id
lowercase__ = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , tie_word_embeddings=UpperCamelCase , **UpperCamelCase )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCamelCase : PretrainedConfig , UpperCamelCase : str = "default" , UpperCamelCase : List[PatchingSpec] = None , UpperCamelCase : bool = False , ):
'''simple docstring'''
super().__init__(UpperCamelCase , task=UpperCamelCase , patching_specs=UpperCamelCase , use_past=UpperCamelCase )
if not getattr(self._config , '''pad_token_id''' , UpperCamelCase ):
# TODO: how to do that better?
lowercase__ = 0
@property
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase , direction='''inputs''' )
lowercase__ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return self._config.n_head
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowercase__ = super(UpperCamelCase , self ).generate_dummy_inputs(
UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase )
# We need to order the input in the way they appears in the forward()
lowercase__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__ ,lowercase__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__ = seqlen + 2
lowercase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__ = [
(torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers )
]
lowercase__ = common_inputs['''attention_mask''']
if self.use_past:
lowercase__ = ordered_inputs['''attention_mask'''].dtype
lowercase__ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
return 13
| 2 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = [10, 20, 30, 40, 50, 60]
lowercase_ : Optional[Any] = [2, 4, 6, 8, 10, 12]
lowercase_ : Union[str, Any] = 1_00
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 2_10 )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 93 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_SCREAMING_SNAKE_CASE = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ):
if attention_mask is None:
snake_case_ : Dict = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
snake_case_ : List[str] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
snake_case_ : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[str] , _A : Optional[Any] , _A : Optional[Any]=13 , _A : Optional[int]=7 , _A : Optional[int]=True , _A : Optional[int]=False , _A : Any=99 , _A : Dict=16 , _A : Union[str, Any]=2 , _A : List[Any]=4 , _A : Optional[Any]=4 , _A : Dict="gelu" , _A : Tuple=0.1 , _A : int=0.1 , _A : Optional[Any]=32 , _A : str=2 , _A : str=1 , _A : List[str]=0 , _A : Tuple=0.0_2 , ) -> List[str]:
"""simple docstring"""
snake_case_ : Dict = parent
snake_case_ : List[Any] = batch_size
snake_case_ : Optional[Any] = seq_length
snake_case_ : Optional[Any] = is_training
snake_case_ : Any = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : List[Any] = eos_token_id
snake_case_ : Any = pad_token_id
snake_case_ : Union[str, Any] = bos_token_id
snake_case_ : List[str] = initializer_range
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case_ : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
snake_case_ : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
snake_case_ : Any = shift_tokens_right(_A , 1 , 2 )
snake_case_ : int = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
snake_case_ : List[str] = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def UpperCAmelCase_ ( self : Any ) -> int:
"""simple docstring"""
snake_case_ ,snake_case_ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ ( self : int , _A : Optional[Any] , _A : List[Any] , _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = 20
snake_case_ : List[str] = model_class_name(_A )
snake_case_ : List[Any] = model.encode(inputs_dict['input_ids'] )
snake_case_ ,snake_case_ : Any = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
snake_case_ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
snake_case_ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
snake_case_ : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case_ : Any = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
snake_case_ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
snake_case_ : List[Any] = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
snake_case_ : Optional[Any] = model.decode(_A , _A )
snake_case_ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase_ ( self : Any , _A : Any , _A : Tuple , _A : Any ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = 20
snake_case_ : Dict = model_class_name(_A )
snake_case_ : Union[str, Any] = model.encode(inputs_dict['input_ids'] )
snake_case_ ,snake_case_ : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
snake_case_ : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
snake_case_ : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
snake_case_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case_ : Tuple = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
snake_case_ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
snake_case_ : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
snake_case_ : Dict = model.decode(_A , _A , decoder_attention_mask=_A )
snake_case_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__magic_name__: List[str] = 99
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
snake_case_ : Tuple = input_ids.shape[0]
snake_case_ : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
snake_case_ ,snake_case_ ,snake_case_ : List[Any] = self._get_config_and_data()
snake_case_ : str = FlaxBlenderbotForConditionalGeneration(_A )
snake_case_ : int = lm_model(input_ids=_A )
snake_case_ : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _A )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
snake_case_ : Optional[Any] = FlaxBlenderbotForConditionalGeneration(_A )
snake_case_ : Any = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
snake_case_ : Optional[int] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
snake_case_ : Optional[int] = lm_model(input_ids=_A , decoder_input_ids=_A )
snake_case_ : Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _A )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
snake_case_ : Dict = shift_tokens_right(_A , 1 , 2 )
snake_case_ : Tuple = np.equal(_A , 1 ).astype(np.floataa ).sum()
snake_case_ : List[str] = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase , snake_case_ ):
__magic_name__: List[str] = True
__magic_name__: int = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__magic_name__: Optional[int] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCAmelCase_ ( self : Any ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = FlaxBlenderbotModelTester(self )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
snake_case_ ,snake_case_ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ : Dict = self._prepare_for_class(_A , _A )
snake_case_ : Dict = model_class(_A )
@jax.jit
def encode_jitted(_A : List[Any] , _A : Union[str, Any]=None , **_A : str ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('JIT Enabled' ):
snake_case_ : Dict = encode_jitted(**_A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
snake_case_ : int = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ : Tuple = model_class(_A )
snake_case_ : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
snake_case_ : Any = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[int] , _A : Tuple , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('JIT Enabled' ):
snake_case_ : Union[str, Any] = decode_jitted(**_A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
snake_case_ : Dict = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ : List[Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
snake_case_ : int = np.ones((1, 1) ) * model.config.eos_token_id
snake_case_ : Any = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
snake_case_ : int = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
snake_case_ : Any = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_A )
snake_case_ : Optional[Any] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
snake_case_ : Optional[int] = ['Sam']
snake_case_ : Optional[Any] = tokenizer(_A , return_tensors='jax' )
snake_case_ : int = model.generate(**_A , **_A )
snake_case_ : Tuple = 'Sam is a great name. It means "sun" in Gaelic.'
snake_case_ : Union[str, Any] = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 88 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 | 1 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ ( a__ ):
"""simple docstring"""
__UpperCamelCase = "new-model"
if is_tf_available():
class __magic_name__ ( a__ ):
"""simple docstring"""
__UpperCamelCase = NewModelConfig
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Any = 'bert-base-cased'
A_ : Tuple = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ : Dict = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = 'bert-base-cased'
A_ : Dict = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ : Any = TFAutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ : Tuple = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
A_ : Tuple = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ : str = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
A_ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
A_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A_ : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ : int = TFAutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A_ : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
@require_tensorflow_probability
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
A_ : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ : str = TFAutoModelForTableQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE_ )
A_ : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(
SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Any = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE_ ) , 14_410 )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : int = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE_ ) , 14_410 )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ : str = copy.deepcopy(model.config )
A_ : Union[str, Any] = ['FunnelBaseModel']
A_ : Union[str, Any] = TFAutoModel.from_config(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
A_ : List[Any] = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
try:
AutoConfig.register("new-model" , SCREAMING_SNAKE_CASE_ )
A_ : Dict = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
auto_class.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
auto_class.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
auto_class.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now that the config is registered, it can be used as any other config with the auto-API
A_ : Optional[Any] = BertModelTester(self ).get_config()
A_ : Union[str, Any] = NewModelConfig(**tiny_config.to_dict() )
A_ : List[Any] = auto_class.from_config(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
A_ : Optional[Any] = auto_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , "bert-base is not a local folder and is not a valid model identifier" ):
A_ : List[Any] = TFAutoModel.from_pretrained("bert-base" )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A_ : List[Any] = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="aaaaaa" )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE_ , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
A_ : int = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , "Use `from_pt=True` to load this model" ):
A_ : Optional[int] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
A_ : str = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
A_ : Tuple = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
A_ : Optional[int] = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 300 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__UpperCAmelCase = None
__UpperCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__UpperCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[str] = None
# Automatically constructed
UpperCAmelCase__ : ClassVar[str] = "PIL.Image.Image"
UpperCAmelCase__ : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCAmelCase__ : str = field(default="Image" , init=a__ , repr=a__ )
def __call__( self ) -> Any:
return self.pa_type
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
return {"path": value, "bytes": None}
elif isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
return {"path": None, "bytes": value}
elif isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(SCREAMING_SNAKE_CASE_ )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
UpperCamelCase : Any = {}
UpperCamelCase , UpperCamelCase : Union[str, Any] = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = PIL.Image.open(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : int = path.split('::' )[-1]
try:
UpperCamelCase : Optional[Any] = string_to_dict(SCREAMING_SNAKE_CASE_, config.HUB_DATASETS_URL )['repo_id']
UpperCamelCase : str = token_per_repo_id.get(SCREAMING_SNAKE_CASE_ )
except ValueError:
UpperCamelCase : Tuple = None
with xopen(SCREAMING_SNAKE_CASE_, 'rb', use_auth_token=SCREAMING_SNAKE_CASE_ ) as f:
UpperCamelCase : Optional[int] = BytesIO(f.read() )
UpperCamelCase : int = PIL.Image.open(bytes_ )
else:
UpperCamelCase : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCamelCase : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.binary() )
UpperCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, storage], ['bytes', 'path'], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase : Optional[int] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.string() )
UpperCamelCase : Union[str, Any] = pa.StructArray.from_arrays([storage, path_array], ['bytes', 'path'], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
UpperCamelCase : List[str] = storage.field('bytes' )
else:
UpperCamelCase : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
UpperCamelCase : List[str] = storage.field('path' )
else:
UpperCamelCase : Optional[Any] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.string() )
UpperCamelCase : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array], ['bytes', 'path'], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase : Optional[Any] = pa.array(
[encode_np_array(np.array(SCREAMING_SNAKE_CASE_ ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ), type=pa.string() )
UpperCamelCase : int = pa.StructArray.from_arrays(
[bytes_array, path_array], ['bytes', 'path'], mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_, self.pa_type )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE_ ):
with xopen(SCREAMING_SNAKE_CASE_, 'rb' ) as f:
UpperCamelCase : Optional[int] = f.read()
return bytes_
UpperCamelCase : Union[str, Any] = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase : Any = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE_ ) if path is not None else None for path in storage.field('path' ).to_pylist()], type=pa.string(), )
UpperCamelCase : int = pa.StructArray.from_arrays([bytes_array, path_array], ['bytes', 'path'], mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_, self.pa_type )
def UpperCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCamelCase ( snake_case__ : "PIL.Image.Image" ) -> bytes:
UpperCamelCase : Any = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase : Tuple = image.format
else:
UpperCamelCase : List[str] = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(snake_case__ , format=snake_case__ )
return buffer.getvalue()
def UpperCamelCase ( snake_case__ : "PIL.Image.Image" ) -> dict:
if hasattr(snake_case__ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(snake_case__ )}
def UpperCamelCase ( snake_case__ : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
UpperCamelCase : Union[str, Any] = array.dtype
UpperCamelCase : List[Any] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
UpperCamelCase : Optional[Any] = dtype.kind
UpperCamelCase : Any = dtype.itemsize
UpperCamelCase : int = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase : Optional[Any] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase : List[Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase : Dict = dtype_byteorder + dtype_kind + str(snake_case__ )
UpperCamelCase : str = np.dtype(snake_case__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
UpperCamelCase : Union[str, Any] = PIL.Image.fromarray(array.astype(snake_case__ ) )
return {"path": None, "bytes": image_to_bytes(snake_case__ )}
def UpperCamelCase ( snake_case__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
UpperCamelCase , UpperCamelCase : Union[str, Any] = first_non_null_value(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(snake_case__ , np.ndarray ):
UpperCamelCase : List[Any] = no_op_if_value_is_null(snake_case__ )
return [obj_to_image_dict_func(snake_case__ ) for obj in objs]
elif isinstance(snake_case__ , PIL.Image.Image ):
UpperCamelCase : Optional[int] = no_op_if_value_is_null(snake_case__ )
return [obj_to_image_dict_func(snake_case__ ) for obj in objs]
else:
return objs
else:
return objs
| 119 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __UpperCAmelCase ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase = """mobilenet_v1"""
def __init__(self : List[Any] , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Optional[int]=224 , _lowerCAmelCase : Union[str, Any]=1.0 , _lowerCAmelCase : int=8 , _lowerCAmelCase : Dict="relu6" , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[int]=0.999 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Union[str, Any]=0.001 , **_lowerCAmelCase : str , ):
super().__init__(**_A )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
A = num_channels
A = image_size
A = depth_multiplier
A = min_depth
A = hidden_act
A = tf_padding
A = classifier_dropout_prob
A = initializer_range
A = layer_norm_eps
class __UpperCAmelCase ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : int ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def A (self : Optional[int] ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def A (self : Union[str, Any] ):
return 1e-4
| 364 |
'''simple docstring'''
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = credit_card_number
A = 0
A = len(UpperCAmelCase ) - 2
for i in range(UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
A = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
A = cc_number[:i] + str(UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(UpperCAmelCase ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(UpperCAmelCase ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(UpperCAmelCase ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 337 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
UpperCamelCase__ = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case__ , unittest.TestCase ):
_a : int = CamembertTokenizer
_a : Dict = CamembertTokenizerFast
_a : Tuple = True
_a : List[Any] = True
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = CamembertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_A ) , 1_0_0_4 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = CamembertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.encode(_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.tokenize(_A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {"input_ids": [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowerCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=_A , )
| 92 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class __UpperCamelCase ( a__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase : str =field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCamelCase : ClassVar[Features] =Features({"""text""": Value("""string""" )} )
lowerCamelCase : ClassVar[Features] =Features({"""summary""": Value("""string""" )} )
lowerCamelCase : str ="text"
lowerCamelCase : str ="summary"
@property
def __a ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 79 |
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 79 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=1000 , SCREAMING_SNAKE_CASE_=[3, 3, 6, 4] , SCREAMING_SNAKE_CASE_=[48, 56, 112, 220] , ) -> List[Any]:
UpperCamelCase :List[Any] = parent
UpperCamelCase :Optional[int] = batch_size
UpperCamelCase :str = num_channels
UpperCamelCase :Tuple = is_training
UpperCamelCase :Optional[int] = use_labels
UpperCamelCase :str = hidden_dropout_prob
UpperCamelCase :Optional[int] = attention_probs_dropout_prob
UpperCamelCase :Dict = num_labels
UpperCamelCase :List[str] = image_size
UpperCamelCase :Union[str, Any] = layer_depths
UpperCamelCase :Union[str, Any] = embed_dims
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :Any = None
if self.use_labels:
UpperCamelCase :Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase :int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=SCREAMING_SNAKE_CASE_ , layer_scale_init_value=1e-5 , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase :Any = SwiftFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :Optional[Any] = self.num_labels
UpperCamelCase :List[Any] = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :str = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCamelCase :Optional[int] = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) :List[Any] = self.prepare_config_and_inputs()
UpperCamelCase :str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any =(SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : Tuple =(
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[Any] =False
UpperCamelCase_ : List[str] =False
UpperCamelCase_ : str =False
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : List[Any] =False
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Any = SwiftFormerModelTester(self )
UpperCamelCase :Any = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Any = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase , UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
UpperCamelCase :Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :str = SwiftFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def UpperCAmelCase ( self ) -> str:
pass
def UpperCAmelCase ( self ) -> Any:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase :Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase :Optional[int] = outputs.hidden_states
UpperCamelCase :List[Any] = 8
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCamelCase , UpperCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase :Any = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
def _config_zero_init(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Union[str, Any] = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1e-10 )
if isinstance(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Dict = _config_zero_init(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return configs_no_init
UpperCamelCase , UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :str = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Any:
pass
def _A ( ):
UpperCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> int:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Union[str, Any] = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = self.default_image_processor
UpperCamelCase :Union[str, Any] = prepare_img()
UpperCamelCase :Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase :Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 259 |
import math
def _A ( SCREAMING_SNAKE_CASE__ : int = 100 ):
UpperCamelCase :Dict = sum(i * i for i in range(1 , n + 1 ) )
UpperCamelCase :List[str] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 259 | 1 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = []
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> bool:
"""simple docstring"""
for i in range(len(lowercase_ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase_ ,-1 ,-1 ) ,range(lowercase_ ,-1 ,-1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase_ ,-1 ,-1 ) ,range(lowercase_ ,len(lowercase_ ) ) ):
if board[i][j] == 1:
return False
return True
def lowercase__ ( lowercase_ ,lowercase_ ) -> bool:
"""simple docstring"""
if row >= len(lowercase_ ):
solution.append(lowercase_ )
printboard(lowercase_ )
print()
return True
for i in range(len(lowercase_ ) ):
if is_safe(lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : Optional[int] = 1
solve(lowercase_ ,row + 1 )
_UpperCamelCase : Dict = 0
return False
def lowercase__ ( lowercase_ ) -> None:
"""simple docstring"""
for i in range(len(lowercase_ ) ):
for j in range(len(lowercase_ ) ):
if board[i][j] == 1:
print("Q" ,end=" " )
else:
print("." ,end=" " )
print()
# n=int(input("The no. of queens"))
lowerCamelCase__ = 8
lowerCamelCase__ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 369 |
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict:
super(__a , self ).__init__()
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Any = W_supports["start_token_id"].item()
_UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : str = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Any = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Any = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : Optional[Any] = p_start
_UpperCamelCase : str = p_end
return p_starts, p_ends
| 310 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__magic_name__ = precision
__magic_name__ = ceil(precision / 14 )
__magic_name__ = 426880 * Decimal(10005 ).sqrt()
__magic_name__ = 1
__magic_name__ = 13591409
__magic_name__ = Decimal(A_ )
for k in range(1, A_ ):
__magic_name__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(A_ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : str = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 88 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowercase_ = len(UpperCAmelCase ) - 1
def A__ ( self , UpperCAmelCase ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase_ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase ) , 5 ) == 1
return output_values
def A__ ( self , UpperCAmelCase ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase_ = self.basis_function(UpperCAmelCase )
lowercase_ = 0.0
lowercase_ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def A__ ( self , UpperCAmelCase = 0.01 ) -> Dict:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
lowercase_ = [] # x coordinates of points to plot
lowercase_ = [] # y coordinates of points to plot
lowercase_ = 0.0
while t <= 1:
lowercase_ = self.bezier_curve_function(UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowercase_ = [i[0] for i in self.list_of_points]
lowercase_ = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase , UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(UpperCAmelCase , UpperCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 357 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase )
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
def extract(*UpperCAmelCase , **UpperCAmelCase ):
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.ones([0] )
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase )
return self
return Out()
return extract
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
lowercase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase_ = 77
lowercase_ = self.dummy_image.to(UpperCAmelCase )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase )
lowercase_ = alt_pipe.to(UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "A painting of a squirrel eating a burger"
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe(
[prompt] , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , image=UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ = init_image.resize((760, 504) )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
lowercase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase_ = init_image.resize((768, 512) )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase_ = "BAAI/AltDiffusion"
lowercase_ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase_ = "A fantasy landscape, trending on artstation"
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 297 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
a :Dict = XLMRobertaTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = '''<pad>'''
a :Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_lowerCamelCase ) , 1002 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = XLMRobertaTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
a :str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a :Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a :List[str] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
a :int = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a :Dict = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a :List[str] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
a :Optional[Any] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
a :Any = tempfile.mkdtemp()
a :int = tokenizer_r.save_pretrained(_lowerCamelCase )
a :List[Any] = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
a :Union[str, Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_lowerCamelCase , _lowerCamelCase )
# Checks everything loads correctly in the same way
a :int = tokenizer_r.from_pretrained(_lowerCamelCase )
a :Union[str, Any] = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=True
a :Optional[int] = tempfile.mkdtemp()
a :Any = tokenizer_r.save_pretrained(_lowerCamelCase , legacy_format=_lowerCamelCase )
a :List[str] = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCamelCase , _lowerCamelCase )
# Checks everything loads correctly in the same way
a :str = tokenizer_r.from_pretrained(_lowerCamelCase )
a :Tuple = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=False
a :str = tempfile.mkdtemp()
a :int = tokenizer_r.save_pretrained(_lowerCamelCase , legacy_format=_lowerCamelCase )
a :Any = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
a :Union[str, Any] = tokenizer_r.from_pretrained(_lowerCamelCase )
a :List[str] = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def SCREAMING_SNAKE_CASE__ ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_lowerCamelCase , f.name )
a :Union[str, Any] = XLMRobertaTokenizer(f.name , keep_accents=_lowerCamelCase )
a :Optional[Any] = pickle.dumps(_lowerCamelCase )
pickle.loads(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.test_rust_tokenizer:
return
a :Union[str, Any] = self.get_tokenizer()
a :Dict = self.get_rust_tokenizer()
a :Any = '''I was born in 92000, and this is falsé.'''
a :int = tokenizer.tokenize(_lowerCamelCase )
a :Dict = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :Dict = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
a :Union[str, Any] = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :Union[str, Any] = self.get_rust_tokenizer()
a :Tuple = tokenizer.encode(_lowerCamelCase )
a :Union[str, Any] = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = '''Hello World!'''
a :Optional[Any] = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a :List[Any] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# fmt: off
a :Optional[Any] = {'''input_ids''': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 94 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1
lowercase : List[Any] = n
lowercase : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowercase : Union[str, Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = w
def __lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.dp[u][v]
if __name__ == "__main__":
__a = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 1_28, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 1_42, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
__UpperCamelCase : Optional[int] = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 1_28,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 1_42,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__UpperCamelCase ) , x.transpose() ) )
__UpperCamelCase : Optional[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__UpperCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = np.random.randn(3 , 4 )
__UpperCamelCase : Optional[Any] = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase ) , transpose(__UpperCamelCase ).numpy() ) )
__UpperCamelCase : str = np.random.randn(3 , 4 , 5 )
__UpperCamelCase : str = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase , axes=(1, 2, 0) ) , transpose(__UpperCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = np.random.randn(3 , 4 )
__UpperCamelCase : Optional[int] = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase ) , transpose(__UpperCamelCase ).numpy() ) )
__UpperCamelCase : List[Any] = np.random.randn(3 , 4 , 5 )
__UpperCamelCase : List[Any] = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase , axes=(1, 2, 0) ) , transpose(__UpperCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Any = np.random.randn(3 , 4 )
__UpperCamelCase : Optional[int] = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase ) , np.asarray(transpose(__UpperCamelCase ) ) ) )
__UpperCamelCase : Tuple = np.random.randn(3 , 4 , 5 )
__UpperCamelCase : Any = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(transpose(__UpperCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__UpperCamelCase , axes=(1, 2, 0) ) ) ) )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (4, 3) ) , np.reshape(__UpperCamelCase , (4, 3) ) ) )
__UpperCamelCase : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (12, 5) ) , np.reshape(__UpperCamelCase , (12, 5) ) ) )
@require_torch
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = np.random.randn(3 , 4 )
__UpperCamelCase : Optional[Any] = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (4, 3) ) , reshape(__UpperCamelCase , (4, 3) ).numpy() ) )
__UpperCamelCase : str = np.random.randn(3 , 4 , 5 )
__UpperCamelCase : str = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (12, 5) ) , reshape(__UpperCamelCase , (12, 5) ).numpy() ) )
@require_tf
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = np.random.randn(3 , 4 )
__UpperCamelCase : Tuple = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (4, 3) ) , reshape(__UpperCamelCase , (4, 3) ).numpy() ) )
__UpperCamelCase : int = np.random.randn(3 , 4 , 5 )
__UpperCamelCase : Dict = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (12, 5) ) , reshape(__UpperCamelCase , (12, 5) ).numpy() ) )
@require_flax
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : str = np.random.randn(3 , 4 )
__UpperCamelCase : Optional[int] = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (4, 3) ) , np.asarray(reshape(__UpperCamelCase , (4, 3) ) ) ) )
__UpperCamelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
__UpperCamelCase : Dict = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(reshape(__UpperCamelCase , (12, 5) ) , np.asarray(reshape(__UpperCamelCase , (12, 5) ) ) ) )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase ) , np.squeeze(__UpperCamelCase ) ) )
__UpperCamelCase : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase , axis=2 ) , np.squeeze(__UpperCamelCase , axis=2 ) ) )
@require_torch
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : List[str] = np.random.randn(1 , 3 , 4 )
__UpperCamelCase : Tuple = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase ) , squeeze(__UpperCamelCase ).numpy() ) )
__UpperCamelCase : str = np.random.randn(1 , 4 , 1 , 5 )
__UpperCamelCase : Tuple = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase , axis=2 ) , squeeze(__UpperCamelCase , axis=2 ).numpy() ) )
@require_tf
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = np.random.randn(1 , 3 , 4 )
__UpperCamelCase : Tuple = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase ) , squeeze(__UpperCamelCase ).numpy() ) )
__UpperCamelCase : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
__UpperCamelCase : List[str] = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase , axis=2 ) , squeeze(__UpperCamelCase , axis=2 ).numpy() ) )
@require_flax
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : str = np.random.randn(1 , 3 , 4 )
__UpperCamelCase : Optional[int] = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase ) , np.asarray(squeeze(__UpperCamelCase ) ) ) )
__UpperCamelCase : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
__UpperCamelCase : str = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(squeeze(__UpperCamelCase , axis=2 ) , np.asarray(squeeze(__UpperCamelCase , axis=2 ) ) ) )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__UpperCamelCase , axis=1 ) , np.expand_dims(__UpperCamelCase , axis=1 ) ) )
@require_torch
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = np.random.randn(3 , 4 )
__UpperCamelCase : Optional[int] = torch.tensor(__UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(__UpperCamelCase , axis=1 ) , expand_dims(__UpperCamelCase , axis=1 ).numpy() ) )
@require_tf
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = np.random.randn(3 , 4 )
__UpperCamelCase : Optional[Any] = tf.constant(__UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(__UpperCamelCase , axis=1 ) , expand_dims(__UpperCamelCase , axis=1 ).numpy() ) )
@require_flax
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : str = np.random.randn(3 , 4 )
__UpperCamelCase : str = jnp.array(__UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(__UpperCamelCase , axis=1 ) , np.asarray(expand_dims(__UpperCamelCase , axis=1 ) ) ) ) | 171 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase_ (_lowerCAmelCase : int , _lowerCAmelCase : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase_ (_lowerCAmelCase : int ):
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Optional[Any] = 11
__UpperCamelCase : List[str] = int("1" + "0" * digit_len )
for num in range(_lowerCAmelCase , _lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCAmelCase , _lowerCAmelCase ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__UpperCamelCase : Tuple = 10
return solutions
def UpperCAmelCase_ (_lowerCAmelCase : int = 2 ):
__UpperCamelCase : Optional[Any] = 1.0
for fraction in fraction_list(_lowerCAmelCase ):
__UpperCamelCase : Union[str, Any] = Fraction(_lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(solution()) | 171 | 1 |