repo_id
stringlengths 15
89
| file_path
stringlengths 27
180
| content
stringlengths 1
2.23M
| __index_level_0__
int64 0
0
|
---|---|---|---|
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/wordpiece/mod.rs | //! [WordPiece](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf)
//! model.
use crate::models::bpe::BPE;
use crate::tokenizer::{Model, Result, Token};
use std::{
borrow::Cow,
collections::HashMap,
fs::File,
io::prelude::*,
io::{BufRead, BufReader},
path::{Path, PathBuf},
};
mod serialization;
mod trainer;
pub use trainer::*;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("WordPiece error: Missing [UNK] token from the vocabulary")]
MissingUnkToken,
}
type Vocab = HashMap<String, u32>;
type VocabR = HashMap<u32, String>;
struct Config {
files: Option<String>,
vocab: Vocab,
unk_token: String,
continuing_subword_prefix: String,
max_input_chars_per_word: usize,
}
/// A `WordPieceBuilder` can be used to create a `WordPiece` model with a custom configuration.
pub struct WordPieceBuilder {
config: Config,
}
impl Default for WordPieceBuilder {
fn default() -> Self {
Self {
config: Config {
files: None,
vocab: HashMap::new(),
unk_token: String::from("[UNK]"),
continuing_subword_prefix: String::from("##"),
max_input_chars_per_word: 100,
},
}
}
}
impl WordPieceBuilder {
/// Construct a new `WordPieceBuilder`.
pub fn new() -> Self {
Self::default()
}
/// Set the input files.
#[must_use]
pub fn files(mut self, vocab: String) -> Self {
self.config.files = Some(vocab);
self
}
/// Set the vocab (token -> ID) mapping.
#[must_use]
pub fn vocab(mut self, vocab: Vocab) -> Self {
self.config.vocab = vocab;
self
}
/// The the `UNK` token for the vocab.
#[must_use]
pub fn unk_token(mut self, unk_token: String) -> Self {
self.config.unk_token = unk_token;
self
}
/// Set the prefix for continuing subwords.
#[must_use]
pub fn continuing_subword_prefix(mut self, continuing_subword_prefix: String) -> Self {
self.config.continuing_subword_prefix = continuing_subword_prefix;
self
}
/// Set the maximum number of input characters per word.
#[must_use]
pub fn max_input_chars_per_word(mut self, max_input_chars_per_word: usize) -> Self {
self.config.max_input_chars_per_word = max_input_chars_per_word;
self
}
/// Contructs a `WordPiece` model that uses the `WordPieceBuilder`'s configuration.
pub fn build(mut self) -> Result<WordPiece> {
if let Some(vocab) = self.config.files {
self.config.vocab = WordPiece::read_file(&vocab)?;
}
let vocab_r = self
.config
.vocab
.iter()
.map(|(key, val)| (*val, key.to_owned()))
.collect();
Ok(WordPiece {
vocab: self.config.vocab,
vocab_r,
unk_token: self.config.unk_token,
continuing_subword_prefix: self.config.continuing_subword_prefix,
max_input_chars_per_word: self.config.max_input_chars_per_word,
})
}
}
/// A
/// [WordPiece](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf)
/// model.
#[derive(Clone, PartialEq, Eq)]
pub struct WordPiece {
vocab: Vocab,
vocab_r: VocabR,
pub unk_token: String,
pub continuing_subword_prefix: String,
pub max_input_chars_per_word: usize,
}
impl std::fmt::Debug for WordPiece {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("WordPiece")
.field("unk_token", &self.unk_token)
.field("continuing_subword_prefix", &self.continuing_subword_prefix)
.field("max_input_chars_per_word", &self.max_input_chars_per_word)
.field("vocab", &self.vocab.len())
.finish()
}
}
impl Default for WordPiece {
fn default() -> Self {
Self {
vocab: HashMap::new(),
vocab_r: HashMap::new(),
unk_token: String::from("[UNK]"),
continuing_subword_prefix: String::from("##"),
max_input_chars_per_word: 100,
}
}
}
impl WordPiece {
/// Get a `WordPieceBuilder`.
pub fn builder() -> WordPieceBuilder {
WordPieceBuilder::new()
}
/// Read the given files to extract the vocab
pub fn read_file(vocab: &str) -> Result<Vocab> {
let file = File::open(vocab)?;
let file = BufReader::new(file);
let mut vocab = HashMap::new();
for (index, line) in file.lines().enumerate() {
let line = line?;
vocab.insert(line.trim_end().to_owned(), index as u32);
}
Ok(vocab)
}
/// Initialize a `WordPiece` model from a vocab mapping file.
pub fn from_file(vocab: &str) -> WordPieceBuilder {
WordPiece::builder().files(vocab.to_owned())
}
/// Create a `WordPiece` model from a `BPE` model.
pub fn from_bpe(bpe: &BPE) -> Self {
let mut wp = Self::builder().vocab(bpe.get_vocab()).build().unwrap();
if let Some(unk) = bpe.get_unk_token() {
wp.unk_token = unk.to_owned();
}
if let Some(prefix) = bpe.get_continuing_subword_prefix() {
wp.continuing_subword_prefix = prefix.to_owned();
}
wp
}
}
impl Model for WordPiece {
type Trainer = WordPieceTrainer;
fn get_vocab(&self) -> HashMap<String, u32> {
self.vocab.clone()
}
fn get_vocab_size(&self) -> usize {
self.vocab.len()
}
fn tokenize(&self, sequence: &str) -> Result<Vec<Token>> {
let char_len = sequence.chars().count();
if char_len > self.max_input_chars_per_word {
return Ok(vec![Token {
value: self.unk_token.clone(),
id: *self
.vocab
.get(&self.unk_token)
.ok_or(Error::MissingUnkToken)?,
offsets: (0, sequence.len()),
}]);
}
let mut is_bad = false;
let mut start = 0;
let mut sub_tokens: Vec<Token> = vec![];
while start < sequence.len() {
let mut end = sequence.len();
let mut cur_str = None;
while start < end {
let mut substr: Cow<str> = Cow::Borrowed(&sequence[start..end]);
if start > 0 {
substr = Cow::Owned(format!("{}{}", self.continuing_subword_prefix, substr));
}
if self.vocab.contains_key(substr.as_ref()) {
cur_str = Some(Token {
id: self.vocab[substr.as_ref()],
value: substr.to_string(),
offsets: (start, end),
});
break;
}
end -= substr.chars().last().map_or(1, |c| c.len_utf8());
}
if cur_str.is_none() {
is_bad = true;
break;
}
sub_tokens.push(cur_str.unwrap());
start = end;
}
if is_bad {
Ok(vec![Token {
value: self.unk_token.clone(),
id: *self
.vocab
.get(&self.unk_token)
.ok_or(Error::MissingUnkToken)?,
offsets: (0, sequence.len()),
}])
} else {
Ok(sub_tokens)
}
}
fn token_to_id(&self, token: &str) -> Option<u32> {
self.vocab.get(token).copied()
}
fn id_to_token(&self, id: u32) -> Option<String> {
self.vocab_r.get(&id).cloned()
}
fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> {
let vocab_file_name = match name {
Some(name) => format!("{}-vocab.txt", name),
None => "vocab.txt".to_string(),
};
// Write vocab.txt
let vocab_path: PathBuf = [folder, Path::new(vocab_file_name.as_str())]
.iter()
.collect();
let mut vocab_file = File::create(&vocab_path)?;
let mut vocab: Vec<(&String, &u32)> = self.vocab.iter().collect();
vocab.sort_unstable_by_key(|k| *k.1);
vocab_file.write_all(
&vocab
.into_iter()
.flat_map(|(token, _)| format!("{}\n", token).as_bytes().to_owned())
.collect::<Vec<_>>()[..],
)?;
Ok(vec![vocab_path])
}
fn get_trainer(&self) -> Self::Trainer {
WordPieceTrainer::builder().build()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_error_display() {
assert!(format!("{}", Error::MissingUnkToken).contains("Missing [UNK] token"));
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/wordpiece/trainer.rs | use super::WordPiece;
use crate::models::bpe::{BpeTrainer, BpeTrainerBuilder, BPE};
use crate::tokenizer::{AddedToken, Result, Trainer};
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
/// A `WordPieceTrainerBuilder` can be used to create a `WordPieceTrainer` with a custom
/// configuration.
pub struct WordPieceTrainerBuilder {
bpe_trainer_builder: BpeTrainerBuilder,
}
impl Default for WordPieceTrainerBuilder {
fn default() -> Self {
Self {
bpe_trainer_builder: BpeTrainerBuilder::new().continuing_subword_prefix("##".into()),
}
}
}
impl WordPieceTrainerBuilder {
/// Constructs a new `WordPieceTrainerBuilder`
pub fn new() -> Self {
Self::default()
}
/// Set the expected minimum frequency
#[must_use]
pub fn min_frequency(mut self, frequency: u32) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.min_frequency(frequency);
self
}
/// Set the vocabulary size
#[must_use]
pub fn vocab_size(mut self, size: usize) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.vocab_size(size);
self
}
/// Set whether to show progress
#[must_use]
pub fn show_progress(mut self, show: bool) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.show_progress(show);
self
}
/// Set the special tokens
#[must_use]
pub fn special_tokens(mut self, tokens: Vec<AddedToken>) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.special_tokens(tokens);
self
}
/// Set whether to limit the alphabet
#[must_use]
pub fn limit_alphabet(mut self, limit: usize) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.limit_alphabet(limit);
self
}
/// Set the initial alphabet
#[must_use]
pub fn initial_alphabet(mut self, alphabet: HashSet<char>) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.initial_alphabet(alphabet);
self
}
/// Set the continuing_subword_prefix
#[must_use]
pub fn continuing_subword_prefix(mut self, prefix: String) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.continuing_subword_prefix(prefix);
self
}
/// Set the end_of_word_suffix
#[must_use]
pub fn end_of_word_suffix(mut self, suffix: String) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.end_of_word_suffix(suffix);
self
}
/// Constructs the final BpeTrainer
pub fn build(self) -> WordPieceTrainer {
let bpe_trainer = self.bpe_trainer_builder.build();
WordPieceTrainer { bpe_trainer }
}
}
/// Trains a `WordPiece` model.
#[derive(Default, Clone, Deserialize, Serialize)]
pub struct WordPieceTrainer {
bpe_trainer: BpeTrainer,
}
impl WordPieceTrainer {
pub fn min_frequency(&self) -> u32 {
self.bpe_trainer.min_frequency
}
pub fn set_min_frequency(&mut self, freq: u32) {
self.bpe_trainer.min_frequency = freq;
}
pub fn vocab_size(&self) -> usize {
self.bpe_trainer.vocab_size
}
pub fn set_vocab_size(&mut self, size: usize) {
self.bpe_trainer.vocab_size = size;
}
pub fn show_progress(&self) -> bool {
self.bpe_trainer.show_progress
}
pub fn set_show_progress(&mut self, show_progress: bool) {
self.bpe_trainer.show_progress = show_progress;
}
pub fn special_tokens(&self) -> &[AddedToken] {
&self.bpe_trainer.special_tokens
}
pub fn set_special_tokens(&mut self, special_tokens: Vec<AddedToken>) {
self.bpe_trainer.special_tokens = special_tokens;
}
pub fn limit_alphabet(&self) -> Option<usize> {
self.bpe_trainer.limit_alphabet
}
pub fn set_limit_alphabet(&mut self, limit: Option<usize>) {
self.bpe_trainer.limit_alphabet = limit;
}
pub fn initial_alphabet(&self) -> &HashSet<char> {
&self.bpe_trainer.initial_alphabet
}
pub fn set_initial_alphabet(&mut self, alphabet: HashSet<char>) {
self.bpe_trainer.initial_alphabet = alphabet;
}
pub fn continuing_subword_prefix(&self) -> &Option<String> {
&self.bpe_trainer.continuing_subword_prefix
}
pub fn set_continuing_subword_prefix(&mut self, prefix: Option<String>) {
self.bpe_trainer.continuing_subword_prefix = prefix;
}
pub fn end_of_word_suffix(&self) -> &Option<String> {
&self.bpe_trainer.end_of_word_suffix
}
pub fn set_end_of_word_suffix(&mut self, suffix: Option<String>) {
self.bpe_trainer.end_of_word_suffix = suffix;
}
pub fn builder() -> WordPieceTrainerBuilder {
WordPieceTrainerBuilder::default()
}
pub fn train(&self, model: &mut WordPiece) -> Result<Vec<AddedToken>> {
let mut bpe = BPE::default();
let special_tokens = self.bpe_trainer.train(&mut bpe)?;
let new_wordpiece = WordPiece::from_bpe(&bpe);
// Transfer the vocab
model.vocab = new_wordpiece.vocab;
model.vocab_r = new_wordpiece.vocab_r;
// The continuing_subword_prefix is the only other option to be overriden by the trainer
model.continuing_subword_prefix = new_wordpiece.continuing_subword_prefix;
Ok(special_tokens)
}
}
impl Trainer for WordPieceTrainer {
type Model = WordPiece;
fn train(&self, model: &mut WordPiece) -> Result<Vec<AddedToken>> {
self.train(model)
}
fn should_show_progress(&self) -> bool {
self.bpe_trainer.should_show_progress()
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> Result<Vec<String>> + Sync,
{
self.bpe_trainer.feed(iterator, process)
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/wordpiece/serialization.rs | use super::{super::OrderedVocabIter, WordPiece, WordPieceBuilder};
use serde::{
de::{MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use std::collections::HashSet;
impl Serialize for WordPiece {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut model = serializer.serialize_struct("WordPiece", 5)?;
// Small fields first
model.serialize_field("type", "WordPiece")?;
model.serialize_field("unk_token", &self.unk_token)?;
model.serialize_field("continuing_subword_prefix", &self.continuing_subword_prefix)?;
model.serialize_field("max_input_chars_per_word", &self.max_input_chars_per_word)?;
// Then large ones
let ordered_vocab = OrderedVocabIter::new(&self.vocab_r);
model.serialize_field("vocab", &ordered_vocab)?;
model.end()
}
}
impl<'de> Deserialize<'de> for WordPiece {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_struct(
"WordPiece",
&[
"type",
"unk_token",
"continuing_subword_prefix",
"max_input_chars_per_word",
"vocab",
],
WordPieceVisitor,
)
}
}
struct WordPieceVisitor;
impl<'de> Visitor<'de> for WordPieceVisitor {
type Value = WordPiece;
fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "struct WordPiece")
}
fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error>
where
V: MapAccess<'de>,
{
let mut builder = WordPieceBuilder::new();
let mut missing_fields = vec![
// for retrocompatibility the "type" field is not mandatory
"unk_token",
"continuing_subword_prefix",
"max_input_chars_per_word",
"vocab",
]
.into_iter()
.collect::<HashSet<_>>();
while let Some(key) = map.next_key::<String>()? {
match key.as_ref() {
"unk_token" => builder = builder.unk_token(map.next_value()?),
"continuing_subword_prefix" => {
builder = builder.continuing_subword_prefix(map.next_value()?)
}
"max_input_chars_per_word" => {
builder = builder.max_input_chars_per_word(map.next_value()?)
}
"vocab" => builder = builder.vocab(map.next_value()?),
"type" => match map.next_value()? {
"WordPiece" => {}
u => {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(u),
&"WordPiece",
))
}
},
_ => {}
}
missing_fields.remove::<str>(&key);
}
if !missing_fields.is_empty() {
Err(serde::de::Error::missing_field(
missing_fields.iter().next().unwrap(),
))
} else {
Ok(builder.build().map_err(serde::de::Error::custom)?)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn serde() {
let wp = WordPiece::default();
let wp_s = "{\
\"type\":\"WordPiece\",\
\"unk_token\":\"[UNK]\",\
\"continuing_subword_prefix\":\"##\",\
\"max_input_chars_per_word\":100,\
\"vocab\":{}\
}";
assert_eq!(serde_json::to_string(&wp).unwrap(), wp_s);
assert_eq!(serde_json::from_str::<WordPiece>(wp_s).unwrap(), wp);
}
#[test]
fn deserialization_should_fail() {
let missing_unk = "{\
\"type\":\"WordPiece\",\
\"continuing_subword_prefix\":\"##\",\
\"max_input_chars_per_word\":100,\
\"vocab\":{}\
}";
assert!(serde_json::from_str::<WordPiece>(missing_unk)
.unwrap_err()
.to_string()
.starts_with("missing field `unk_token`"));
let wrong_type = "{\
\"type\":\"WordLevel\",\
\"unk_token\":\"[UNK]\",\
\"vocab\":{}\
}";
assert!(serde_json::from_str::<WordPiece>(wrong_type)
.unwrap_err()
.to_string()
.starts_with("invalid value: string \"WordLevel\", expected WordPiece"));
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/bpe/mod.rs | //! [Byte Pair Encoding](https://www.aclweb.org/anthology/P16-1162/) model.
use std::{iter, mem};
mod model;
mod serialization;
pub mod trainer;
mod word;
type Pair = (u32, u32);
/// Errors that can be encountered while using or constructing a `BPE` model.
#[derive(thiserror::Error, Debug)]
pub enum Error {
/// An error encountered while reading files mainly.
#[error("IoError: {0}")]
Io(#[from] std::io::Error),
/// An error forwarded from Serde, while parsing JSON
#[error("JsonError: {0}")]
JsonError(#[from] serde_json::Error),
/// When the vocab.json file is in the wrong format
#[error("Bad vocabulary json file")]
BadVocabulary,
/// When the merges.txt file is in the wrong format. This error holds the line
/// number of the line that caused the error.
#[error("Merges text file invalid at line {0}")]
BadMerges(usize),
/// If a token found in merges, is not in the vocab
#[error("Token `{0}` out of vocabulary")]
MergeTokenOutOfVocabulary(String),
/// If the provided unk token is out of vocabulary
#[error("Unk token `{0}` not found in the vocabulary")]
UnkTokenOutOfVocabulary(String),
/// Dropout not between 0 and 1.
#[error("Dropout should be between 0 and 1")]
InvalidDropout,
}
/// Provides access to the `FirstLastIterator` to any Iterator
pub(crate) trait WithFirstLastIterator: Iterator + Sized {
fn with_first_and_last(self) -> FirstLastIterator<Self>;
}
impl<I> WithFirstLastIterator for I
where
I: Iterator,
{
fn with_first_and_last(self) -> FirstLastIterator<Self> {
FirstLastIterator {
first: true,
iter: self.peekable(),
}
}
}
/// Provides information about whether an item is the first and/or the last of the iterator
pub(crate) struct FirstLastIterator<I>
where
I: Iterator,
{
first: bool,
iter: iter::Peekable<I>,
}
impl<I> Iterator for FirstLastIterator<I>
where
I: Iterator,
{
/// (is_first, is_last, item)
type Item = (bool, bool, I::Item);
fn next(&mut self) -> Option<Self::Item> {
let first = mem::replace(&mut self.first, false);
self.iter
.next()
.map(|e| (first, self.iter.peek().is_none(), e))
}
}
// Re-export
pub use model::*;
pub use trainer::*;
use word::*;
| 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/bpe/word.rs | use super::Pair;
use rand::{thread_rng, Rng};
use std::cmp::Ordering;
use std::collections::{BinaryHeap, HashMap};
#[derive(Debug, Eq)]
struct Merge {
pos: usize,
rank: u32,
new_id: u32,
}
impl PartialEq for Merge {
fn eq(&self, other: &Self) -> bool {
self.rank == other.rank && self.pos == other.pos
}
}
impl PartialOrd for Merge {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
// By manually implementing this, we make the containing BinaryHeap a
// min-heap ordered first on the rank, and the pos otherwise
Some(self.cmp(other))
}
}
impl Ord for Merge {
fn cmp(&self, other: &Self) -> Ordering {
if self.rank != other.rank {
other.rank.cmp(&self.rank)
} else {
other.pos.cmp(&self.pos)
}
}
}
#[derive(Debug, Clone, Copy)]
struct Symbol {
c: u32,
prev: isize,
next: isize,
len: usize,
}
impl Symbol {
/// Merges the current Symbol with the other one.
/// In order to update prev/next, we consider Self to be the Symbol on the left,
/// and other to be the next one on the right.
pub fn merge_with(&mut self, other: &Self, new_c: u32) {
self.c = new_c;
self.len += other.len;
self.next = other.next;
}
}
#[derive(Clone, Default)]
pub(super) struct Word {
symbols: Vec<Symbol>,
}
impl std::fmt::Debug for Word {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("Word")
.field(
"chars",
&self
.symbols
.iter()
.map(|s| s.c.to_string())
.collect::<Vec<_>>()
.join(" "),
)
.field("symbols", &self.symbols)
.finish()
}
}
impl Word {
pub(super) fn new() -> Self {
Word { symbols: vec![] }
}
pub(super) fn with_capacity(capacity: usize) -> Self {
Self {
symbols: Vec::with_capacity(capacity),
}
}
pub(super) fn add(&mut self, c: u32, byte_len: usize) {
let (prev, next) = {
let len = self.symbols.len() as isize;
if let Some(last) = self.symbols.last_mut() {
// Update `next` on the previous one
last.next = len;
(len - 1, -1)
} else {
(-1, -1)
}
};
self.symbols.push(Symbol {
c,
prev,
next,
len: byte_len,
});
}
pub(super) fn merge(
&mut self,
c1: u32,
c2: u32,
replacement: u32,
max_length: usize,
) -> Vec<(Pair, i32)> {
let mut changes: Vec<(Pair, i32)> = vec![];
let mut i = 0;
loop {
if i >= self.symbols.len() {
break;
}
// Found a pair
if self.symbols[i].c == c1 && i + 1 < self.symbols.len() && self.symbols[i + 1].c == c2
{
let first = self.symbols[i];
let second = self.symbols[i + 1];
// Remove in place
let new_s = Symbol {
c: replacement,
prev: first.prev,
next: second.next,
len: first.len + second.len,
};
// If there are other characters before the pair
if i > 0 {
changes.push(((self.symbols[i - 1].c, first.c), -1));
if self.symbols[i - 1].len + new_s.len < max_length {
changes.push(((self.symbols[i - 1].c, replacement), 1));
}
}
self.symbols.insert(i, new_s); // Insert replacement before first char of pair
self.symbols.remove(i + 1); // Remove first char of pair
self.symbols.remove(i + 1); // And then the second
// If there are other characters after the pair
if i < self.symbols.len() - 1 {
changes.push(((second.c, self.symbols[i + 1].c), -1));
if self.symbols[i + 1].len + new_s.len < max_length {
changes.push(((replacement, self.symbols[i + 1].c), 1));
}
}
}
i += 1;
}
changes
}
pub(super) fn merge_all(&mut self, merges: &HashMap<Pair, (u32, u32)>, dropout: Option<f32>) {
let mut queue = BinaryHeap::with_capacity(self.symbols.len());
let mut skip = Vec::with_capacity(queue.len());
queue.extend(
self.symbols
.windows(2)
.enumerate()
.filter_map(|(index, window)| {
let pair = (window[0].c, window[1].c);
merges.get(&pair).map(|m| Merge {
pos: index,
rank: m.0,
new_id: m.1,
})
}),
);
while let Some(top) = queue.pop() {
if dropout
.map(|d| thread_rng().gen::<f32>() < d)
.unwrap_or(false)
{
skip.push(top);
} else {
// Re-insert the skipped elements
queue.extend(skip.drain(..));
if self.symbols[top.pos].len == 0 {
continue;
}
// Do nothing if we are the last symbol
if self.symbols[top.pos].next == -1 {
continue;
}
let next_pos = self.symbols[top.pos].next as usize;
let right = self.symbols[next_pos];
// Make sure we are not processing an expired queue entry
let target_new_pair = (self.symbols[top.pos].c, right.c);
if !merges
.get(&target_new_pair)
.map_or(false, |(_, new_id)| *new_id == top.new_id)
{
continue;
}
// Otherwise, let's merge
self.symbols[top.pos].merge_with(&right, top.new_id);
// Tag the right part as removed
self.symbols[next_pos].len = 0;
// Update `prev` on the new `next` to the current pos
if right.next > -1 && (right.next as usize) < self.symbols.len() {
self.symbols[right.next as usize].prev = top.pos as isize;
}
// Insert the new pair formed with the previous symbol
let current = &self.symbols[top.pos];
if current.prev >= 0 {
let prev = current.prev as usize;
let prev_symbol = self.symbols[prev];
let new_pair = (prev_symbol.c, current.c);
if let Some((rank, new_id)) = merges.get(&new_pair) {
queue.push(Merge {
pos: current.prev as usize,
rank: *rank,
new_id: *new_id,
});
}
}
// Insert the new pair formed with the next symbol
let next = current.next as usize;
if next < self.symbols.len() {
let next_symbol = self.symbols[next];
let new_pair = (current.c, next_symbol.c);
if let Some((rank, new_id)) = merges.get(&new_pair) {
queue.push(Merge {
pos: top.pos,
rank: *rank,
new_id: *new_id,
});
}
}
}
}
// Filter out the removed symbols
self.symbols.retain(|s| s.len != 0);
}
pub(super) fn get_chars(&self) -> Vec<u32> {
self.symbols.iter().map(|s| s.c).collect()
}
pub(super) fn get_chars_iter(&self) -> impl Iterator<Item = u32> + '_ {
self.symbols.iter().map(|s| s.c)
}
pub(super) fn get_offsets_iter(&self) -> impl Iterator<Item = (usize, usize)> + '_ {
let mut pos = 0;
self.symbols.iter().map(move |symbol| {
let new_pos = pos + symbol.len;
let offset = (pos, new_pos);
pos = new_pos;
offset
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_merge() {
// Let's say we have the word 'hello' and a word-to-id vocab that looks
// like this: {'h': 0, 'e': 1, 'l': 2, 'o': 3}.
let mut word = Word::new();
word.add(0, 1); // 'h'
word.add(1, 1); // 'e'
word.add(2, 1); // 'l'
word.add(2, 1); // 'l'
word.add(3, 1); // 'o'
// We're going to perform a merge on the pair ('l', 'l') ~= (2, 2). Let's
// say that 'll' has the ID of 4 in the updated word-to-id vocab.
let changes = word.merge(2, 2, 4, usize::MAX);
// So the word should now look like this:
assert_eq!(
word.get_chars(),
&[
0u32, // 'h'
1u32, // 'e'
4u32, // 'll'
3u32, // 'o'
]
);
// The return value `changes` will be used to update the pair counts during
// training. This merge affects the counts for the pairs
// ('e', 'l') ~= (1, 2),
// ('e', 'll') ~= (1, 4),
// ('l', 'o') ~= (2, 3), and
// ('ll', 'o') ~= (4, 3).
// So the changes should reflect that:
assert_eq!(
changes,
&[
((1u32, 2u32), -1i32), // count for ('e', 'l') should be decreased by 1.
((1u32, 4u32), 1i32), // count for ('e', 'll') should be increased by 1.
((2u32, 3u32), -1i32), // count for ('l', 'o') should be decreased by 1.
((4u32, 3u32), 1i32), // count for ('ll', 'o') should be increased by 1.
]
);
}
#[test]
fn test_merge_max_length() {
// Let's say we have the word 'hello' and a word-to-id vocab that looks
// like this: {'h': 0, 'e': 1, 'l': 2, 'o': 3}.
let mut word = Word::new();
word.add(0, 1); // 'h'
word.add(1, 1); // 'e'
word.add(2, 1); // 'l'
word.add(2, 1); // 'l'
word.add(3, 1); // 'o'
// We're going to perform a merge on the pair ('l', 'l') ~= (2, 2). Let's
// say that 'll' has the ID of 4 in the updated word-to-id vocab.
let changes = word.merge(2, 2, 4, 2);
assert_eq!(
word.get_chars(),
&[
0u32, // 'h'
1u32, // 'e'
4u32, // 'll'
3u32, // 'o'
]
);
assert_eq!(
changes,
&[
((1u32, 2u32), -1i32), // count for ('e', 'l') should be decreased by 1.
// ((1u32, 4u32), 1i32), Missing since this would be larger than 2
((2u32, 3u32), -1i32), // count for ('l', 'o') should be decreased by 1.
// ((4u32, 3u32), 1i32), Missing since this would be larger than 2
]
);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/bpe/trainer.rs | #![allow(clippy::map_entry)]
use super::{Pair, WithFirstLastIterator, Word, BPE};
use crate::parallelism::*;
use crate::tokenizer::{AddedToken, Result, Trainer};
use crate::utils::progress::{ProgressBar, ProgressStyle};
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
use std::collections::{BinaryHeap, HashMap, HashSet};
#[derive(Debug, Eq)]
struct Merge {
pair: Pair,
count: u32,
pos: HashSet<usize>,
}
impl PartialEq for Merge {
fn eq(&self, other: &Self) -> bool {
self.count == other.count && self.pair == other.pair
}
}
impl PartialOrd for Merge {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Merge {
fn cmp(&self, other: &Self) -> Ordering {
if self.count != other.count {
self.count.cmp(&other.count)
} else {
// Here we want ascending order
other.pair.cmp(&self.pair)
}
}
}
struct Config {
min_frequency: u32,
vocab_size: usize,
show_progress: bool,
special_tokens: Vec<AddedToken>,
limit_alphabet: Option<usize>,
initial_alphabet: HashSet<char>,
continuing_subword_prefix: Option<String>,
end_of_word_suffix: Option<String>,
max_token_length: Option<usize>,
}
/// A `BpeTrainerBuilder` can be used to create a `BpeTrainer` with a custom
/// configuration.
pub struct BpeTrainerBuilder {
config: Config,
}
impl Default for BpeTrainerBuilder {
fn default() -> Self {
Self {
config: Config {
min_frequency: 0,
vocab_size: 30000,
show_progress: true,
special_tokens: vec![],
limit_alphabet: None,
initial_alphabet: HashSet::new(),
continuing_subword_prefix: None,
end_of_word_suffix: None,
max_token_length: None,
},
}
}
}
impl BpeTrainerBuilder {
/// Constructs a new `BpeTrainerBuilder`
pub fn new() -> Self {
Self::default()
}
/// Set the expected minimum frequency
#[must_use]
pub fn min_frequency(mut self, frequency: u32) -> Self {
self.config.min_frequency = frequency;
self
}
/// Set the vocabulary size
#[must_use]
pub fn vocab_size(mut self, size: usize) -> Self {
self.config.vocab_size = size;
self
}
/// Set whether to show progress
#[must_use]
pub fn show_progress(mut self, show: bool) -> Self {
self.config.show_progress = show;
self
}
/// Set the special tokens
#[must_use]
pub fn special_tokens(mut self, tokens: Vec<AddedToken>) -> Self {
self.config.special_tokens = tokens;
self
}
/// Set whether to limit the alphabet
#[must_use]
pub fn limit_alphabet(mut self, limit: usize) -> Self {
self.config.limit_alphabet = Some(limit);
self
}
/// Set the initial alphabet
#[must_use]
pub fn initial_alphabet(mut self, alphabet: HashSet<char>) -> Self {
self.config.initial_alphabet = alphabet;
self
}
/// Set the continuing_subword_prefix
#[must_use]
pub fn continuing_subword_prefix(mut self, prefix: String) -> Self {
self.config.continuing_subword_prefix = Some(prefix);
self
}
/// Set the end_of_word_suffix
#[must_use]
pub fn end_of_word_suffix(mut self, suffix: String) -> Self {
self.config.end_of_word_suffix = Some(suffix);
self
}
/// Set max_token_length
#[must_use]
pub fn max_token_length(mut self, max_token_length: Option<usize>) -> Self {
self.config.max_token_length = max_token_length;
self
}
/// Constructs the final BpeTrainer
pub fn build(self) -> BpeTrainer {
BpeTrainer {
min_frequency: self.config.min_frequency,
vocab_size: self.config.vocab_size,
show_progress: self.config.show_progress,
special_tokens: self.config.special_tokens,
limit_alphabet: self.config.limit_alphabet,
initial_alphabet: self.config.initial_alphabet,
continuing_subword_prefix: self.config.continuing_subword_prefix,
end_of_word_suffix: self.config.end_of_word_suffix,
max_token_length: self.config.max_token_length,
words: HashMap::new(),
}
}
}
/// In charge of training a `BPE` model
///
/// # Examples
///
/// ```
/// use tokenizers::tokenizer::Trainer;
/// use tokenizers::models::bpe::{BPE, BpeTrainer};
///
/// let sequences = vec![ "Hello", "World" ];
///
/// let mut trainer = BpeTrainer::default();
/// trainer.feed(sequences.iter(), |s| Ok(vec![s.to_owned()]));
///
/// let mut model = BPE::default();
/// let special_tokens = trainer.train(&mut model).unwrap();
/// ```
#[non_exhaustive]
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)]
pub struct BpeTrainer {
/// The minimum frequency a pair must have to produce a merge operation
pub min_frequency: u32,
/// The target vocabulary size
pub vocab_size: usize,
/// Whether to show progress while training
pub show_progress: bool,
/// A list of special tokens that the model should know of
pub special_tokens: Vec<AddedToken>,
/// Whether to limit the number of initial tokens that can be kept before computing merges
pub limit_alphabet: Option<usize>,
/// The initial alphabet we want absolutely to include. This allows to cover
/// some characters that are not necessarily in the training set
pub initial_alphabet: HashSet<char>,
/// An optional prefix to use on any subword that exist only behind another one
pub continuing_subword_prefix: Option<String>,
/// An optional suffix to caracterize and end-of-word subword
pub end_of_word_suffix: Option<String>,
/// An optional parameter to limit the max length of any single token
pub max_token_length: Option<usize>,
words: HashMap<String, u32>,
}
impl Default for BpeTrainer {
fn default() -> Self {
Self::builder().build()
}
}
impl BpeTrainer {
pub fn new(min_frequency: u32, vocab_size: usize) -> Self {
Self {
min_frequency,
vocab_size,
..Default::default()
}
}
pub fn builder() -> BpeTrainerBuilder {
BpeTrainerBuilder::new()
}
/// Setup a progress bar if asked to show progress
fn setup_progress(&self) -> Option<ProgressBar> {
if self.show_progress {
let p = ProgressBar::new(0);
p.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {msg:<30!} {wide_bar} {pos:<9!}/{len:>9!}")
.expect("Invalid progress template"),
);
Some(p)
} else {
None
}
}
/// Set the progress bar in the finish state
fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) {
if let Some(p) = p {
p.set_length(final_len as u64);
p.finish();
println!();
}
}
/// Update the progress bar with the new provided length and message
fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &'static str) {
if let Some(p) = p {
p.set_message(message);
p.set_length(len as u64);
p.reset();
}
}
/// Add the provided special tokens to the initial vocabulary
fn add_special_tokens(&self, w2id: &mut HashMap<String, u32>, id2w: &mut Vec<String>) {
for token in &self.special_tokens {
if !w2id.contains_key(&token.content) {
id2w.push(token.content.to_owned());
w2id.insert(token.content.to_owned(), (id2w.len() - 1) as u32);
}
}
}
/// Compute the initial alphabet and limit it if relevant
fn compute_alphabet(
&self,
wc: &HashMap<String, u32>,
w2id: &mut HashMap<String, u32>,
id2w: &mut Vec<String>,
) {
// Compute the alphabet from seen words
let mut alphabet: HashMap<char, usize> = HashMap::new();
for (word, count) in wc {
for c in word.chars() {
alphabet
.entry(c)
.and_modify(|cnt| *cnt += *count as usize)
.or_insert(*count as usize);
}
}
// Also include anything from the provided initial alphabet
for c in &self.initial_alphabet {
alphabet
.entry(*c)
.and_modify(|cnt| *cnt = std::usize::MAX)
.or_insert(std::usize::MAX);
}
let mut kept = alphabet.iter().collect::<Vec<_>>();
// Compute the number of chars to remove from the alphabet
// If `limit_alphabet < initial_alphabet.len()`, some of these initial characters
// will be removed
let to_remove = self
.limit_alphabet
.map(|limit| {
if alphabet.len() > limit {
alphabet.len() - limit
} else {
0
}
})
.unwrap_or(0);
// Remove the unwanted chars
if to_remove > 0 {
kept.sort_unstable_by_key(|k| *k.1);
kept.drain(..to_remove);
}
// Keep the initial alphabet (sorted for determinism)
kept.sort_unstable_by_key(|k| (*k.0) as u32);
kept.into_iter().for_each(|(c, _)| {
let s = c.to_string();
if !w2id.contains_key(&s) {
id2w.push(s.clone());
w2id.insert(s, (id2w.len() - 1) as u32);
}
});
}
/// Tokenize words and add subwords to the vocabulary when relevant
fn tokenize_words(
&self,
wc: &HashMap<String, u32>,
w2id: &mut HashMap<String, u32>,
id2w: &mut Vec<String>,
p: &Option<ProgressBar>,
) -> (Vec<Word>, Vec<u32>) {
let mut words: Vec<Word> = Vec::with_capacity(wc.len());
let mut counts: Vec<u32> = Vec::with_capacity(wc.len());
for (word, count) in wc {
let mut current_word = Word::new();
counts.push(*count);
for (is_first, is_last, c) in word.chars().with_first_and_last() {
let mut s = c.to_string();
if w2id.contains_key(&s) {
// Found the initial char in the authorized alphabet
// Add the `continuing_subword_prefix` if relevant
if !is_first {
if let Some(prefix) = &self.continuing_subword_prefix {
s = format!("{}{}", prefix, s);
}
}
// Add the `end_of_word_suffix` if relevant
if is_last {
if let Some(suffix) = &self.end_of_word_suffix {
s = format!("{}{}", s, suffix);
}
}
// Insert the new formed string if necessary
if !w2id.contains_key(&s) {
id2w.push(s.clone());
w2id.insert(s.clone(), (id2w.len() - 1) as u32);
}
current_word.add(w2id[&s], 1); // We do not care about the len here
}
}
words.push(current_word);
if let Some(p) = p {
p.inc(1);
}
}
(words, counts)
}
fn count_pairs(
&self,
words: &[Word],
counts: &[u32],
p: &Option<ProgressBar>,
) -> (HashMap<Pair, i32>, HashMap<Pair, HashSet<usize>>) {
words
.maybe_par_iter()
.enumerate()
.map(|(i, word)| {
let mut pair_counts = HashMap::new();
let mut where_to_update: HashMap<Pair, HashSet<usize>> = HashMap::new();
for window in word.get_chars().windows(2) {
let cur_pair: Pair = (window[0], window[1]);
// Initialize pair_counts and where_to_update for this pair if we just saw it
if !pair_counts.contains_key(&cur_pair) {
pair_counts.insert(cur_pair, 0);
}
// Then update counts
let count = counts[i];
where_to_update
.entry(cur_pair)
.and_modify(|h| {
h.insert(i);
})
.or_insert_with(|| {
let mut h = HashSet::new();
h.insert(i);
h
});
*pair_counts.get_mut(&cur_pair).unwrap() += count as i32;
}
if let Some(p) = &p {
p.inc(1);
}
(pair_counts, where_to_update)
})
.reduce(
|| (HashMap::new(), HashMap::new()),
|(mut pair_counts, mut where_to_update), (pc, wtu)| {
for (k, v) in pc {
pair_counts.entry(k).and_modify(|c| *c += v).or_insert(v);
}
for (k, v) in wtu {
where_to_update
.entry(k)
.and_modify(|set| *set = set.union(&v).copied().collect())
.or_insert(v);
}
(pair_counts, where_to_update)
},
)
}
pub fn do_train(
&self,
word_counts: &HashMap<String, u32>,
model: &mut BPE,
) -> Result<Vec<AddedToken>> {
let mut word_to_id: HashMap<String, u32> = HashMap::with_capacity(self.vocab_size);
let mut id_to_word: Vec<String> = Vec::with_capacity(self.vocab_size);
let max_token_length: usize = self.max_token_length.unwrap_or(usize::MAX);
let progress = self.setup_progress();
//
// 1. Add all special tokens to the vocabulary
//
self.add_special_tokens(&mut word_to_id, &mut id_to_word);
//
// 2. Compute the initial alphabet
//
self.compute_alphabet(word_counts, &mut word_to_id, &mut id_to_word);
//
// 3. Tokenize words
//
self.update_progress(&progress, word_counts.len(), "Tokenize words");
let (words, counts) =
self.tokenize_words(word_counts, &mut word_to_id, &mut id_to_word, &progress);
self.finalize_progress(&progress, words.len());
//
// 4. Count pairs in words
//
self.update_progress(&progress, words.len(), "Count pairs");
let (mut pair_counts, mut where_to_update) = self.count_pairs(&words, &counts, &progress);
// Insert them in the queue
let mut queue = BinaryHeap::with_capacity(pair_counts.len());
where_to_update.drain().for_each(|(pair, pos)| {
let count = pair_counts[&pair];
if count > 0 {
queue.push(Merge {
pair,
count: count as u32,
pos,
});
}
});
self.finalize_progress(&progress, words.len());
//
// 5. Do merges
//
self.update_progress(&progress, self.vocab_size, "Compute merges");
let mut merges: Vec<(Pair, u32)> = vec![];
loop {
// Stop as soon as we have a big enough vocabulary
if word_to_id.len() >= self.vocab_size {
break;
}
if queue.is_empty() {
break;
}
let mut top = queue.pop().unwrap();
if top.count != pair_counts[&top.pair] as u32 {
top.count = pair_counts[&top.pair] as u32;
queue.push(top);
continue;
}
if top.count < 1 || self.min_frequency > top.count {
break;
}
let part_a = &id_to_word[top.pair.0 as usize];
let mut part_b = id_to_word[top.pair.1 as usize].to_owned();
// Build new token
if let Some(prefix) = &self.continuing_subword_prefix {
if part_b.starts_with(prefix) {
let prefix_byte_len = prefix.chars().map(|c| c.len_utf8()).sum();
part_b = part_b[prefix_byte_len..].to_string();
}
}
let new_token = format!("{}{}", part_a, part_b);
// implement sentencepiece-like merge.
// if this code were to be merged, integrate a way in the python bindings to communicate this variable
// default should be 0/None to maintain previous behavior. 16 is the spm default.
// Insert new token if it does not already exist
let new_token_id = word_to_id
.get(&new_token)
.copied()
.unwrap_or(id_to_word.len() as u32);
if word_to_id.get(&new_token).is_none() {
id_to_word.push(new_token.clone());
word_to_id.insert(new_token.clone(), new_token_id);
}
merges.push((top.pair, new_token_id));
// Merge the new pair in every words
let changes = top
.pos
.maybe_par_iter()
.flat_map(|&i| {
let word = &words[i] as *const _ as *mut Word;
// We can merge each of these words in parallel here because each position
// can be there only once (HashSet). So this is safe.
unsafe {
// let word: &mut Word = &mut (*word);
(*word)
.merge(top.pair.0, top.pair.1, new_token_id, max_token_length)
.into_iter()
.map(|c| (c, i))
.collect::<Vec<_>>()
}
})
.collect::<Vec<_>>();
// Introduce new formed pairs
for ((pair, change), iw) in changes {
let count = change * counts[iw] as i32;
pair_counts
.entry(pair)
.and_modify(|c| *c += count)
.or_insert(count);
if change > 0 {
where_to_update
.entry(pair)
.and_modify(|h| {
h.insert(iw);
})
.or_insert_with(|| {
let mut h = HashSet::new();
h.insert(iw);
h
});
}
}
where_to_update.drain().for_each(|(pair, pos)| {
let count = pair_counts[&pair];
if count > 0 {
queue.push(Merge {
pair,
count: count as u32,
pos,
});
}
});
if let Some(p) = &progress {
p.inc(1);
}
}
self.finalize_progress(&progress, merges.len());
// Transfer new vocab & options to model
model.vocab = word_to_id;
model.vocab_r = model
.vocab
.iter()
.map(|(key, val)| (*val, key.to_owned()))
.collect();
model.merges = merges
.into_iter()
.enumerate()
.map(|(i, (pair, new_token_id))| (pair, (i as u32, new_token_id)))
.collect();
if let Some(prefix) = &self.continuing_subword_prefix {
model.continuing_subword_prefix = Some(prefix.to_owned());
} else {
model.continuing_subword_prefix = None;
}
if let Some(suffix) = &self.end_of_word_suffix {
model.end_of_word_suffix = Some(suffix.to_owned());
} else {
model.end_of_word_suffix = None;
}
Ok(self.special_tokens.clone())
}
}
impl Trainer for BpeTrainer {
type Model = BPE;
/// Train a BPE model
fn train(&self, model: &mut BPE) -> Result<Vec<AddedToken>> {
self.do_train(&self.words, model)
}
/// Whether we should show progress
fn should_show_progress(&self) -> bool {
self.show_progress
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> Result<Vec<String>> + Sync,
{
let words: Result<HashMap<String, u32>> = iterator
.maybe_par_bridge()
.map(|sequence| {
let words = process(sequence.as_ref())?;
let mut map = HashMap::new();
for word in words {
map.entry(word).and_modify(|c| *c += 1).or_insert(1);
}
Ok(map)
})
.reduce(
|| Ok(HashMap::new()),
|acc, ws| {
let mut acc = acc?;
for (k, v) in ws? {
acc.entry(k).and_modify(|c| *c += v).or_insert(v);
}
Ok(acc)
},
);
self.words = words?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::{BpeTrainer, Pair, BPE};
use std::collections::HashMap;
#[test]
fn test_train() {
let word_counts: HashMap<String, u32> = [
("roses".into(), 1),
("are".into(), 2),
("red".into(), 1),
("voilets".into(), 1),
("blue".into(), 1),
("BERT".into(), 1),
("is".into(), 2),
("big".into(), 1),
("and".into(), 1),
("so".into(), 1),
("GPT-2".into(), 1),
]
.iter()
.cloned()
.collect();
let trainer = BpeTrainer::builder()
.show_progress(false)
.min_frequency(2)
.build();
let mut model = BPE::default();
trainer.do_train(&word_counts, &mut model).unwrap();
// Vocab should contain all of the characters from the `word_counts` mapping
// as well as three merges: 're', 'are', and 'is'.
let expected_vocab: HashMap<String, u32> = [
("-".into(), 0),
("2".into(), 1),
("B".into(), 2),
("E".into(), 3),
("G".into(), 4),
("P".into(), 5),
("R".into(), 6),
("T".into(), 7),
("a".into(), 8),
("b".into(), 9),
("d".into(), 10),
("e".into(), 11),
("g".into(), 12),
("i".into(), 13),
("l".into(), 14),
("n".into(), 15),
("o".into(), 16),
("r".into(), 17),
("s".into(), 18),
("t".into(), 19),
("u".into(), 20),
("v".into(), 21),
("re".into(), 22),
("are".into(), 23),
("is".into(), 24),
]
.iter()
.cloned()
.collect();
assert_eq!(model.vocab, expected_vocab);
// The keys in `merges` are pairs of symbols, the values are tuples of (rank, id),
// where 'rank' determines the order in which this merge will be applied during
// tokenization, and 'id' is the vocab id of the symbol resulting from merging
// the pair of symbols in the corresponding key.
let expected_merges: HashMap<Pair, (u32, u32)> = [
((17, 11), (0, 22)), // 'r' + 'e' -> 're'
((8, 22), (1, 23)), // 'a' + 're' -> 'are'
((13, 18), (2, 24)), // 'i' + 's' -> 'is'
]
.iter()
.cloned()
.collect();
assert_eq!(model.merges, expected_merges);
}
#[test]
fn bpe_test_max_token_length_16() {
/* bpe_test_max_token_length series of tests test the max_token_length flag of bpetrainer
// this is the more robust version that only tests max length of learned tokens
// (pre) tokenizer settings or vocab can be easily modified when necessary
*/
let max_token_length = 16;
let long_word_counts: HashMap<String, u32> = [
("singlelongtokenwithoutcasechange", 2),
("singleLongTokenWithCamelCaseChange", 2),
("Longsingletokenwithpunctu@t!onwithin", 2),
("Anotherlongsingletokenwithnumberw1th1n", 2),
("짧은한글문자열짧은한", 2), // korean 10 char
("긴한글문자열긴한글문자열긴한글문", 2), // korean 16 char
("短字符串短字符串短字", 2), //simplified chinese 10 char
("长字符串长字符串长字符串长字符串", 2), // simp. chinese 16 char
("短い文字列短い文字列", 2), // japanese 10 char
("長い文字列長い文字列長い文字列長", 2), // japanese 16 char
("so", 2),
("GPT-2", 2),
]
.iter()
.map(|(key, value)| (key.to_string(), *value))
.collect();
let trainer = BpeTrainer::builder()
.max_token_length(Some(max_token_length))
.show_progress(false)
.min_frequency(0)
.build();
let mut model = BPE::default();
trainer.do_train(&long_word_counts, &mut model).unwrap();
let vocab = model.get_vocab();
for token in vocab.keys() {
assert!(
token.chars().count() <= max_token_length,
"token too long : {} , chars().count() = {}",
token,
token.chars().count()
)
}
}
#[test]
fn bpe_test_max_token_length_direct_assert() {
/* more direct version of bpe_test_max_token_length test
// directly compares tokens with known expected values.
// maybe unstable depending on specific settings or changes.
*/
let long_word_counts: HashMap<String, u32> = [
("sin", 2),
("Sin", 2),
("Lon", 2),
("Ano", 2),
("짧은한", 2),
("긴한글", 2),
("短字符", 2),
("长字符", 2),
("短い文", 2),
("長い文", 2),
("so", 2),
("GP", 2),
]
.iter()
.map(|(key, value)| (key.to_string(), *value))
.collect();
let trainer = BpeTrainer::builder()
.max_token_length(Some(2))
.show_progress(false)
.min_frequency(0)
.build();
let mut model = BPE::default();
trainer.do_train(&long_word_counts, &mut model).unwrap();
let trained_vocab: HashMap<String, u32> = model.get_vocab();
let expected_vocab: HashMap<String, u32> = [
("短", 12),
("n", 6),
("i", 5),
("s", 8),
("字符", 23),
("長", 14),
("긴", 17),
("い文", 22),
("L", 2),
("in", 21),
("o", 7),
("은한", 29),
("S", 4),
("P", 3),
("so", 27),
("符", 13),
("文", 11),
("字", 10),
("짧", 19),
("GP", 25),
("글", 16),
("G", 1),
("An", 24),
("长", 15),
("A", 0),
("Lo", 26),
("긴한", 28),
("い", 9),
("한", 20),
("은", 18),
]
.iter()
.cloned()
.map(|(k, v)| (k.to_string(), v))
.collect();
assert_eq!(trained_vocab, expected_vocab)
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/bpe/serialization.rs | use super::{super::OrderedVocabIter, convert_merges_to_hashmap, BpeBuilder, Pair, BPE};
use serde::{
de::{Error, MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use std::collections::HashMap;
impl Serialize for BPE {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut model = serializer.serialize_struct("BPE", 8)?;
// Start by small fields
model.serialize_field("type", "BPE")?;
model.serialize_field("dropout", &self.dropout)?;
model.serialize_field("unk_token", &self.unk_token)?;
model.serialize_field("continuing_subword_prefix", &self.continuing_subword_prefix)?;
model.serialize_field("end_of_word_suffix", &self.end_of_word_suffix)?;
model.serialize_field("fuse_unk", &self.fuse_unk)?;
model.serialize_field("byte_fallback", &self.byte_fallback)?;
// Then the large ones
let mut merges: Vec<(&Pair, &u32)> = self
.merges
.iter()
.map(|(pair, (rank, _))| (pair, rank))
.collect();
merges.sort_unstable_by_key(|k| *k.1);
let merges_str = merges
.into_iter()
.map(|(pair, _)| format!("{} {}", self.vocab_r[&pair.0], self.vocab_r[&pair.1]))
.collect::<Vec<_>>();
let ordered_vocab = OrderedVocabIter::new(&self.vocab_r);
model.serialize_field("vocab", &ordered_vocab)?;
model.serialize_field("merges", &merges_str)?;
model.end()
}
}
impl<'de> Deserialize<'de> for BPE {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_struct(
"BPE",
&[
"type",
"dropout",
"unk_token",
"continuing_subword_prefix",
"end_of_word_suffix",
"fuse_unk",
"byte_fallback",
"vocab",
"merges",
],
BPEVisitor,
)
}
}
struct BPEVisitor;
impl<'de> Visitor<'de> for BPEVisitor {
type Value = BPE;
fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "struct BPE")
}
fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error>
where
V: MapAccess<'de>,
{
let mut builder = BpeBuilder::new();
let mut vocab: Option<HashMap<String, u32>> = None;
let mut merges: Option<Vec<String>> = None;
while let Some(key) = map.next_key::<String>()? {
match key.as_ref() {
"dropout" => {
if let Some(dropout) = map.next_value()? {
builder = builder.dropout(dropout);
}
}
"unk_token" => {
if let Some(unk) = map.next_value()? {
builder = builder.unk_token(unk);
}
}
"continuing_subword_prefix" => {
if let Some(prefix) = map.next_value()? {
builder = builder.continuing_subword_prefix(prefix);
}
}
"end_of_word_suffix" => {
if let Some(suffix) = map.next_value()? {
builder = builder.end_of_word_suffix(suffix);
}
}
"fuse_unk" => {
if let Some(suffix) = map.next_value()? {
builder = builder.fuse_unk(suffix);
}
}
"byte_fallback" => {
if let Some(suffix) = map.next_value()? {
builder = builder.byte_fallback(suffix);
}
}
"vocab" => vocab = Some(map.next_value()?),
"merges" => merges = Some(map.next_value()?),
"type" => match map.next_value()? {
"BPE" => {}
u => {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(u),
&"BPE",
))
}
},
_ => {}
}
}
if let (Some(vocab), Some(merges)) = (vocab, merges) {
let merges =
convert_merges_to_hashmap(merges.into_iter(), &vocab).map_err(Error::custom)?;
builder = builder.vocab_and_merges(vocab, merges);
Ok(builder.build().map_err(Error::custom)?)
} else {
Err(Error::custom("Missing vocab/merges"))
}
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/bpe/model.rs | use super::{super::OrderedVocabIter, trainer::BpeTrainer, Error, Pair, Word};
use crate::tokenizer::{Model, Result, Token};
use crate::utils::cache::{Cache, DEFAULT_CACHE_CAPACITY};
use crate::utils::iter::ResultShunt;
use serde_json::Value;
use std::borrow::Cow;
use std::{
collections::HashMap,
fs::File,
io::prelude::*,
io::{BufRead, BufReader},
path::{Path, PathBuf},
};
pub type Vocab = HashMap<String, u32>;
type VocabR = HashMap<u32, String>;
pub type MergeMap = HashMap<Pair, (u32, u32)>;
pub type Merges = Vec<(String, String)>;
struct Config {
files: Option<(String, String)>,
vocab: Vocab,
merges: Merges,
cache_capacity: usize,
dropout: Option<f32>,
unk_token: Option<String>,
continuing_subword_prefix: Option<String>,
end_of_word_suffix: Option<String>,
fuse_unk: bool,
byte_fallback: bool,
}
/// A `BpeBuilder` can be used to create a `BPE` model with a custom configuration.
pub struct BpeBuilder {
config: Config,
}
impl Default for BpeBuilder {
fn default() -> Self {
Self {
config: Config {
files: None,
vocab: HashMap::new(),
merges: vec![],
cache_capacity: DEFAULT_CACHE_CAPACITY,
dropout: None,
unk_token: None,
continuing_subword_prefix: None,
end_of_word_suffix: None,
fuse_unk: false,
byte_fallback: false,
},
}
}
}
impl BpeBuilder {
/// Constructs a new `BpeBuilder`.
pub fn new() -> Self {
Self::default()
}
/// Set the input files.
#[must_use]
pub fn files(mut self, vocab: String, merges: String) -> Self {
self.config.files = Some((vocab, merges));
self
}
/// Set the vocab (token -> ID) and merges mappings.
#[must_use]
pub fn vocab_and_merges(mut self, vocab: Vocab, merges: Merges) -> Self {
self.config.vocab = vocab;
self.config.merges = merges;
self
}
/// Set the cache's capacity. Set to 0 if you want to disable caching.
#[must_use]
pub fn cache_capacity(mut self, capacity: usize) -> Self {
self.config.cache_capacity = capacity;
self
}
/// Use [dropout](https://arxiv.org/abs/1910.13267) with the model.
#[must_use]
pub fn dropout(mut self, dropout: f32) -> Self {
self.config.dropout = Some(dropout);
self
}
/// Set the `UNK` token for the vocab.
#[must_use]
pub fn unk_token(mut self, unk_token: String) -> Self {
self.config.unk_token = Some(unk_token);
self
}
/// Set the `continuing_subword_prefix` option.
#[must_use]
pub fn continuing_subword_prefix(mut self, prefix: String) -> Self {
self.config.continuing_subword_prefix = Some(prefix);
self
}
/// Set the `end_of_word_suffix` option.
#[must_use]
pub fn end_of_word_suffix(mut self, prefix: String) -> Self {
self.config.end_of_word_suffix = Some(prefix);
self
}
/// Set the `fuse_unk` option.
#[must_use]
pub fn fuse_unk(mut self, fuse_unk: bool) -> Self {
self.config.fuse_unk = fuse_unk;
self
}
/// Set the `byte_fallback` option.
#[must_use]
pub fn byte_fallback(mut self, byte_fallback: bool) -> Self {
self.config.byte_fallback = byte_fallback;
self
}
/// Returns a `BPE` model that uses the `BpeBuilder`'s configuration.
pub fn build(mut self) -> Result<BPE> {
// Validate dropout.
if let Some(p) = self.config.dropout {
if p <= 0.0 || p > 1.0 {
return Err(Error::InvalidDropout.into());
}
}
// Read files if necessary
if let Some((vocab, merges)) = self.config.files {
let (v, m) = BPE::read_file(&vocab, &merges)?;
self.config.vocab = v;
self.config.merges = m;
}
let vocab_r = self
.config
.vocab
.iter()
.map(|(key, val)| (*val, key.to_owned()))
.collect();
let cache = match self.config.cache_capacity {
0 => None,
capacity => Some(Cache::new(capacity)),
};
let vocab = self.config.vocab;
let prefix_len = if let Some(prefix) = &self.config.continuing_subword_prefix {
prefix.len()
} else {
0
};
let merge_map: MergeMap = self
.config
.merges
.into_iter()
.enumerate()
.map(|(i, (a, b))| -> Result<(Pair, (u32, u32))> {
let a_id = vocab
.get(&a)
.ok_or_else(|| Error::MergeTokenOutOfVocabulary(a.to_owned()))?;
let b_id = vocab
.get(&b)
.ok_or_else(|| Error::MergeTokenOutOfVocabulary(b.to_owned()))?;
let new_token = format!("{}{}", a, &b[prefix_len..]);
let new_id = vocab
.get(&new_token)
.ok_or(Error::MergeTokenOutOfVocabulary(new_token))?;
Ok(((*a_id, *b_id), (i as u32, *new_id)))
})
.collect::<Result<MergeMap>>()?;
// merges.insert(pair, (rank as u32, *new_id));
Ok(BPE {
vocab,
vocab_r,
merges: merge_map,
cache,
dropout: self.config.dropout,
unk_token: self.config.unk_token,
continuing_subword_prefix: self.config.continuing_subword_prefix,
end_of_word_suffix: self.config.end_of_word_suffix,
fuse_unk: self.config.fuse_unk,
byte_fallback: self.config.byte_fallback,
})
}
}
/// A [Byte Pair Encoding](https://www.aclweb.org/anthology/P16-1162/) model.
#[derive(PartialEq)]
pub struct BPE {
/// The vocabulary assigns a number to each token.
pub(crate) vocab: Vocab,
/// Reversed vocabulary, to rebuild sentences.
pub(crate) vocab_r: VocabR,
/// Contains the mapping between Pairs and their (rank, new_id).
pub(crate) merges: MergeMap,
/// Contains the cache for optimizing the encoding step.
cache: Option<Cache<String, Word>>,
/// Dropout probability for merges. 0 = no dropout is the default. At 1.0, tokenization will
/// perform no merges, so the result will just be characters.
pub dropout: Option<f32>,
/// The unknown token to be used when we encounter an unknown char
pub unk_token: Option<String>,
/// An optional prefix to use on any subword that exist only behind another one
pub continuing_subword_prefix: Option<String>,
/// An optional suffix to caracterize and end-of-word subword
pub end_of_word_suffix: Option<String>,
/// Do multiple unk tokens get fused
pub fuse_unk: bool,
/// Byte fallback from sentence pieces, instead of UNK, uses `"<0x00>"`
/// for each byte in the unk token
pub byte_fallback: bool,
}
impl std::fmt::Debug for BPE {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("BPE")
.field("dropout", &self.dropout)
.field("unk_token", &self.unk_token)
.field("continuing_subword_prefix", &self.continuing_subword_prefix)
.field("end_of_word_suffix", &self.end_of_word_suffix)
.field("fuse_unk", &self.fuse_unk)
.field("byte_fallback", &self.byte_fallback)
.field("vocab", &self.vocab.len())
.field("merges", &self.merges.len())
.finish()
}
}
impl Default for BPE {
fn default() -> Self {
Self::builder().build().unwrap()
}
}
impl Clone for BPE {
// `Clone` can't be derive because it's not implemented for `Cache`.
// To keep things simple when we clone, the new BPE will start with a fresh cache.
fn clone(&self) -> Self {
let fresh_cache = self.cache.as_ref().map(|cache| cache.fresh());
Self {
vocab: self.vocab.clone(),
vocab_r: self.vocab_r.clone(),
merges: self.merges.clone(),
cache: fresh_cache,
dropout: self.dropout,
unk_token: self.unk_token.clone(),
continuing_subword_prefix: self.continuing_subword_prefix.clone(),
end_of_word_suffix: self.end_of_word_suffix.clone(),
fuse_unk: self.fuse_unk,
byte_fallback: self.byte_fallback,
}
}
}
/// Converts the merges strings (for example from `merges.txt` file) with the format
/// "{pair_a} {pair_b}" into the format expected by the BPE struct
pub(crate) fn convert_merges_to_hashmap<I: Iterator<Item = String>>(
iter: I,
_vocab: &Vocab,
) -> Result<Merges> {
let mut merges = vec![];
let lines = iter.filter(|l| !l.starts_with("#version"));
for (rank, line) in lines.enumerate() {
let parts = line.split(' ').collect::<Vec<_>>();
if parts.len() != 2 {
return Err(Error::BadMerges(rank + 1).into());
}
merges.push((parts[0].to_string(), parts[1].to_string()));
}
Ok(merges)
}
impl BPE {
/// Initialize a `BpeBuilder`.
pub fn builder() -> BpeBuilder {
BpeBuilder::new()
}
/// Create a new BPE model with the given vocab and merges.
pub fn new(vocab: Vocab, merges: Merges) -> Self {
Self::builder()
.vocab_and_merges(vocab, merges)
.build()
.unwrap()
}
/// Initialize a BpeBuilder model from vocab and merges files
pub fn from_file(vocab: &str, merges: &str) -> BpeBuilder {
Self::builder().files(vocab.to_owned(), merges.to_owned())
}
/// Read the given files to extract the vocab and merges
pub fn read_file(vocab: &str, merges: &str) -> Result<(Vocab, Merges)> {
// Read vocab.json
let vocab_file = File::open(vocab)?;
let mut vocab_file = BufReader::new(vocab_file);
let mut buffer = String::new();
vocab_file.read_to_string(&mut buffer)?;
let json: Value = serde_json::from_str(&buffer)?;
let mut vocab = HashMap::new();
match json {
Value::Object(m) => {
for (token, id) in m {
if let Value::Number(id) = id {
let id = id.as_u64().ok_or(Error::BadVocabulary)? as u32;
vocab.insert(token, id);
}
}
}
_ => return Err(Box::new(Error::BadVocabulary)),
};
// Read merges file
let merge_file = File::open(merges)?;
let merge_file = BufReader::new(merge_file);
let merges = ResultShunt::process(merge_file.lines(), |iter| {
convert_merges_to_hashmap(iter, &vocab)
})??;
Ok((vocab, merges))
}
/// Reset the cache.
pub fn clear_cache(&self) {
if let Some(ref cache) = self.cache {
cache.clear()
}
}
pub fn get_vocab(&self) -> Vocab {
self.vocab.clone()
}
pub fn get_unk_token(&self) -> &Option<String> {
&self.unk_token
}
pub fn get_continuing_subword_prefix(&self) -> &Option<String> {
&self.continuing_subword_prefix
}
fn merge_word(&self, w: &str) -> Result<Word> {
let mut indices = w.char_indices().map(|(idx, _)| idx).peekable();
let mut word = Word::with_capacity(w.len());
let mut unk: Option<(u32, usize)> = None;
while let Some(i) = indices.next() {
let end = indices.peek();
let is_first = i == 0;
let is_last = end.is_none();
let mut s = if let Some(e) = end {
Cow::Borrowed(&w[i..*e])
} else {
Cow::Borrowed(&w[i..])
};
let byte_len = s.len();
// Add the `continuing_subword_prefix` if relevant
if !is_first {
if let Some(ref prefix) = self.continuing_subword_prefix {
s = format!("{}{}", prefix, s).into()
}
}
// Add the `end_of_word_suffix` if relevant
if is_last {
if let Some(ref suffix) = self.end_of_word_suffix {
s = format!("{}{}", s, suffix).into()
}
}
if let Some(id) = self.vocab.get(s.as_ref()) {
if let Some((unk_id, unk_len)) = unk {
word.add(unk_id, unk_len);
unk = None;
}
word.add(*id, byte_len);
} else {
if self.byte_fallback {
let tokens: Option<Vec<_>> = s
.bytes()
.map(|b| -> Option<&u32> {
let code = format!("<{:#04X}>", b);
self.vocab.get(&code)
})
.collect();
if let Some(tokens) = tokens {
for t in tokens {
word.add(*t, 1);
}
continue;
}
}
if let Some(unk_token) = &self.unk_token {
unk = match (unk, self.fuse_unk) {
(Some((unk_id, unk_len)), true) => {
// Fuse unk
Some((unk_id, unk_len + byte_len))
}
(Some((unk_id, unk_len)), false) => {
// Do not fuse unk, add the previous one
word.add(unk_id, unk_len);
Some((
*self.vocab.get(unk_token).ok_or_else(|| {
Error::UnkTokenOutOfVocabulary(unk_token.to_owned())
})?,
byte_len,
))
}
_ => Some((
*self.vocab.get(unk_token).ok_or_else(|| {
Error::UnkTokenOutOfVocabulary(unk_token.to_owned())
})?,
byte_len,
)),
};
}
}
}
if let Some((unk_id, unk_len)) = unk {
word.add(unk_id, unk_len);
}
word.merge_all(&self.merges, self.dropout);
Ok(word)
}
fn word_to_tokens<'a, 'b: 'a>(&'a self, word: &'b Word) -> impl Iterator<Item = Token> + 'a {
word.get_chars_iter()
.zip(word.get_offsets_iter())
.map(move |(id, offsets)| Token::new(id, self.vocab_r[&id].clone(), offsets))
}
fn tokenize_with_cache(&self, sequence: &str) -> Result<Vec<Token>> {
if let Some(ref hit) = self.cache.as_ref().and_then(|c| c.get(sequence)) {
Ok(self.word_to_tokens(hit).collect())
} else {
let word = self.merge_word(sequence)?;
let ret = self.word_to_tokens(&word).collect();
if let Some(ref cache) = self.cache {
cache.set(sequence.to_owned(), word);
}
Ok(ret)
}
}
}
impl Model for BPE {
type Trainer = BpeTrainer;
fn get_vocab(&self) -> HashMap<String, u32> {
self.vocab.clone()
}
fn get_vocab_size(&self) -> usize {
self.vocab.len()
}
fn tokenize(&self, sequence: &str) -> Result<Vec<Token>> {
if sequence.is_empty() {
return Ok(vec![]);
}
if self.dropout.is_none() {
self.tokenize_with_cache(sequence)
} else {
let word = self.merge_word(sequence)?;
Ok(self.word_to_tokens(&word).collect())
}
}
fn token_to_id(&self, token: &str) -> Option<u32> {
self.vocab.get(token).copied()
}
fn id_to_token(&self, id: u32) -> Option<String> {
self.vocab_r.get(&id).cloned()
}
fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> {
let vocab_file_name = match name {
Some(name) => format!("{}-vocab.json", name),
None => "vocab.json".to_string(),
};
// Write vocab.json
let vocab_path: PathBuf = [folder, Path::new(vocab_file_name.as_str())]
.iter()
.collect();
let mut vocab_file = File::create(&vocab_path)?;
let order_vocab_iter = OrderedVocabIter::new(&self.vocab_r);
let serialized = serde_json::to_string(&order_vocab_iter)?;
vocab_file.write_all(serialized.as_bytes())?;
// Write merges.txt
let merges_file_name = match name {
Some(name) => format!("{}-merges.txt", name),
None => "merges.txt".to_string(),
};
let merges_path: PathBuf = [folder, Path::new(merges_file_name.as_str())]
.iter()
.collect();
let mut merges_file = File::create(&merges_path)?;
let mut merges: Vec<(&Pair, &u32)> = self
.merges
.iter()
.map(|(pair, (rank, _))| (pair, rank))
.collect();
merges.sort_unstable_by_key(|k| *k.1);
merges_file.write_all(b"#version: 0.2\n")?;
merges_file.write_all(
&merges
.into_iter()
.flat_map(|(pair, _)| {
format!("{} {}\n", self.vocab_r[&pair.0], self.vocab_r[&pair.1]).into_bytes()
})
.collect::<Vec<_>>()[..],
)?;
Ok(vec![vocab_path, merges_path])
}
fn get_trainer(&self) -> BpeTrainer {
BpeTrainer::default()
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::NamedTempFile;
#[test]
fn test_ordered_vocab_iter() {
let vocab_r: VocabR = [
(0, "a".into()),
(1, "b".into()),
(2, "c".into()),
(3, "ab".into()),
]
.iter()
.cloned()
.collect();
let order_vocab_iter = OrderedVocabIter::new(&vocab_r);
let serialized = serde_json::to_string(&order_vocab_iter).unwrap();
assert_eq!(serialized, "{\"a\":0,\"b\":1,\"c\":2,\"ab\":3}");
}
#[test]
fn test_unk_not_fused() {
let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)]
.iter()
.cloned()
.collect();
let bpe = BpeBuilder::default()
.vocab_and_merges(vocab, vec![])
.unk_token("<unk>".to_string())
.build()
.unwrap();
let tokens = bpe.tokenize("c").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]);
let tokens = bpe.tokenize("cc").unwrap();
assert_eq!(
tokens,
vec![
Token::new(0u32, "<unk>".into(), (0, 1)),
Token::new(0u32, "<unk>".into(), (1, 2)),
]
);
let tokens = bpe.tokenize("accb").unwrap();
assert_eq!(
tokens,
vec![
Token::new(1u32, "a".into(), (0, 1)),
Token::new(0u32, "<unk>".into(), (1, 2)),
Token::new(0u32, "<unk>".into(), (2, 3)),
Token::new(2u32, "b".into(), (3, 4)),
]
);
}
#[test]
fn test_unk_get_fused() {
let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)]
.iter()
.cloned()
.collect();
let bpe = BpeBuilder::default()
.vocab_and_merges(vocab, vec![])
.unk_token("<unk>".to_string())
.fuse_unk(true)
.build()
.unwrap();
let tokens = bpe.tokenize("c").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]);
let tokens = bpe.tokenize("cc").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 2)),]);
let tokens = bpe.tokenize("accb").unwrap();
assert_eq!(
tokens,
vec![
Token::new(1u32, "a".into(), (0, 1)),
Token::new(0u32, "<unk>".into(), (1, 3)),
Token::new(2u32, "b".into(), (3, 4)),
]
);
}
#[test]
// Test tokenization. With dropout set to 0 tokenization is deterministic,
// so we know exactly what the result should be.
//
// To test this, we'll build a simple model to tokenize the word 'unrelated'.
fn test_tokenize_with_and_without_dropout() {
let vocab: Vocab = [
("u".into(), 0),
("n".into(), 1),
("r".into(), 2),
("e".into(), 3),
("l".into(), 4),
("a".into(), 5),
("t".into(), 6),
("d".into(), 7),
("re".into(), 8),
("at".into(), 9),
("ed".into(), 10),
("un".into(), 11),
("ated".into(), 12),
("rel".into(), 13),
("related".into(), 14),
("unrelated".into(), 15),
]
.iter()
.cloned()
.collect();
let merges: Merges = vec![
("r".to_string(), "e".to_string()),
("a".to_string(), "t".to_string()),
("e".to_string(), "d".to_string()),
("u".to_string(), "n".to_string()),
("at".to_string(), "ed".to_string()),
("re".to_string(), "l".to_string()),
("rel".to_string(), "ated".to_string()),
("un".to_string(), "related".to_string()),
];
let mut bpe = BPE::new(vocab, merges);
// With no dropout:
let tokens = bpe.tokenize("unrelated").unwrap();
assert_eq!(tokens, vec![Token::new(15u32, "unrelated".into(), (0, 9))]);
// Now set dropout to 1.0. Result should be no merges performed.
bpe.dropout = Some(1.0);
let tokens = bpe.tokenize("unrelated").unwrap();
assert_eq!(
tokens,
vec![
Token::new(0u32, "u".into(), (0, 1)),
Token::new(1u32, "n".into(), (1, 2)),
Token::new(2u32, "r".into(), (2, 3)),
Token::new(3u32, "e".into(), (3, 4)),
Token::new(4u32, "l".into(), (4, 5)),
Token::new(5u32, "a".into(), (5, 6)),
Token::new(6u32, "t".into(), (6, 7)),
Token::new(3u32, "e".into(), (7, 8)),
Token::new(7u32, "d".into(), (8, 9)),
]
);
// Now try with dropout between 0 and 1.
bpe.dropout = Some(0.5);
let tokens = bpe.tokenize("unrelated").unwrap();
assert!(!tokens.is_empty() && tokens.len() <= 9);
}
#[test]
// Ensure `BPE::from_file` works as expected.
fn test_bpe_from_file() {
// Set up vocab file.
let mut vocab_file = NamedTempFile::new().unwrap();
vocab_file
.write_all(b"{\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}")
.unwrap();
// Set up merges file.
let mut merges_file = NamedTempFile::new().unwrap();
merges_file.write_all(b"#version: 0.2\na b").unwrap();
// Make sure we can instantiate a BPE model from the files.
let builder = BPE::from_file(
vocab_file.path().to_str().unwrap(),
merges_file.path().to_str().unwrap(),
);
let bpe = builder.build().unwrap();
// Check merges.
assert_eq!(bpe.merges.get(&(0, 1)).unwrap(), &(0u32, 3u32));
// Check vocab.
assert_eq!(bpe.vocab.get("a").unwrap(), &0u32);
assert_eq!(bpe.vocab.get("b").unwrap(), &1u32);
assert_eq!(bpe.vocab.get("c").unwrap(), &2u32);
assert_eq!(bpe.vocab.get("ab").unwrap(), &3u32);
}
#[test]
// Ensure `BPE::from_file` works as expected.
fn test_bpe_with_continuing_subword_prefix() {
let vocab: Vocab = vec![
("a".to_string(), 0),
("##b".to_string(), 1),
("##c".to_string(), 2),
("ab".to_string(), 3),
("abc".to_string(), 4),
]
.into_iter()
.collect();
let merges = vec![
("a".to_string(), "##b".to_string()),
("ab".to_string(), "##c".to_string()),
];
let bpe = BPE::builder()
.vocab_and_merges(vocab, merges)
.unk_token("[UNK]".to_string())
.continuing_subword_prefix("##".to_string())
.build()
.unwrap();
let res = bpe.tokenize("ab");
assert_eq!(
res.unwrap(),
vec![Token {
id: 3,
value: "ab".to_string(),
offsets: (0, 2)
}]
);
let res = bpe.tokenize("abc");
assert_eq!(
res.unwrap(),
vec![Token {
id: 4,
value: "abc".to_string(),
offsets: (0, 3)
}]
);
}
#[test]
// Ensure `MergeTokenOutOfVocabulary` error is returned when it should be.
fn test_bpe_from_file_merge_token_oov() {
// Set up vocab file.
let mut vocab_file = NamedTempFile::new().unwrap();
vocab_file
.write_all(b"{\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}")
.unwrap();
// Set up merges file.
let mut merges_file = NamedTempFile::new().unwrap();
merges_file.write_all(b"#version: 0.2\na b\na d").unwrap();
// Ensure the result of BPE::from_file is a MergeTokenOutOfVocabulary error.
match BPE::from_file(
vocab_file.path().to_str().unwrap(),
merges_file.path().to_str().unwrap(),
)
.build()
{
Ok(_) => unreachable!(),
Err(err) => match err.downcast_ref::<Error>() {
Some(Error::MergeTokenOutOfVocabulary(token)) => {
assert_eq!(*token, String::from("d"))
}
_ => unreachable!(),
},
}
}
#[test]
// Ensure `BadMerges` error is returned when there is an invalid line in the
// merges.txt file.
fn test_bpe_from_file_bad_merges() {
// Set up vocab file.
let mut vocab_file = NamedTempFile::new().unwrap();
vocab_file
.write_all("{\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}".as_bytes())
.unwrap();
// Set up merges file with a bad line.
let mut merges_file = NamedTempFile::new().unwrap();
merges_file.write_all(b"#version: 0.2\na b\nc").unwrap();
// Ensure the result of BPE::from_file is a BadMerges error.
match BPE::from_file(
vocab_file.path().to_str().unwrap(),
merges_file.path().to_str().unwrap(),
)
.build()
{
Ok(_) => unreachable!(),
Err(err) => match err.downcast_ref::<Error>() {
Some(Error::BadMerges(line)) => assert_eq!(*line, 2),
_ => unreachable!(),
},
}
}
#[test]
fn test_bpe_byte_fallback() {
// 0x61 == 'a' in bytes
let vocab: Vocab = [("<unk>".into(), 0), ("<0x61>".into(), 1)]
.iter()
.cloned()
.collect();
let bpe = BpeBuilder::default()
.vocab_and_merges(vocab, vec![])
.unk_token("<unk>".to_string())
.byte_fallback(true)
.build()
.unwrap();
let tokens = bpe.tokenize("c").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]);
let tokens = bpe.tokenize("a").unwrap();
assert_eq!(tokens, vec![Token::new(1u32, "<0x61>".into(), (0, 1)),]);
}
#[test]
fn test_bpe_byte_fallback_newline() {
// 0x0A == '\n' in bytes
let vocab: Vocab = [("<unk>".into(), 0), ("<0x0A>".into(), 1)]
.iter()
.cloned()
.collect();
let bpe = BpeBuilder::default()
.vocab_and_merges(vocab, vec![])
.unk_token("<unk>".to_string())
.byte_fallback(true)
.build()
.unwrap();
let tokens = bpe.tokenize("\n").unwrap();
assert_eq!(tokens, vec![Token::new(1u32, "<0x0A>".into(), (0, 1)),]);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/sequence.rs | use crate::decoders::DecoderWrapper;
use crate::tokenizer::{Decoder, Result};
use crate::utils::macro_rules_attribute;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Sequence {
decoders: Vec<DecoderWrapper>,
}
impl Sequence {
pub fn new(decoders: Vec<DecoderWrapper>) -> Self {
Self { decoders }
}
}
impl Decoder for Sequence {
fn decode_chain(&self, mut tokens: Vec<String>) -> Result<Vec<String>> {
for decoder in &self.decoders {
tokens = decoder.decode_chain(tokens)?;
}
Ok(tokens)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::decoders::ctc::CTC;
use crate::pre_tokenizers::metaspace::Metaspace;
#[test]
fn sequence_basic() {
let decoders = vec![
DecoderWrapper::CTC(CTC::default()),
DecoderWrapper::Metaspace(Metaspace::default()),
];
let decoder = Sequence::new(decoders);
let tokens: Vec<String> = vec!["▁", "▁", "H", "H", "i", "i", "▁", "y", "o", "u"]
.into_iter()
.map(|s| s.to_string())
.collect();
let out_tokens = decoder.decode(tokens).unwrap();
assert_eq!(out_tokens, "Hi you");
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/fuse.rs | use crate::tokenizer::{Decoder, Result};
use monostate::MustBe;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
/// Fuse simply fuses all tokens into one big string.
/// It's usually the last decoding step anyway, but this
/// decoder exists incase some decoders need to happen after that
/// step
#[non_exhaustive]
pub struct Fuse {
#[serde(rename = "type")]
type_: MustBe!("Fuse"),
}
impl Fuse {
pub fn new() -> Self {
Self {
type_: MustBe!("Fuse"),
}
}
}
impl Decoder for Fuse {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
let new_string = tokens.join("");
Ok(vec![new_string])
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn decode() {
let decoder = Fuse::new();
let res = decoder
.decode_chain(vec!["Hey".into(), " friend!".into()])
.unwrap();
assert_eq!(res, vec!["Hey friend!"]);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/mod.rs | pub mod bpe;
pub mod byte_fallback;
pub mod ctc;
pub mod fuse;
pub mod sequence;
pub mod strip;
pub mod wordpiece;
// Re-export these as decoders
pub use super::pre_tokenizers::byte_level;
pub use super::pre_tokenizers::metaspace;
use serde::{Deserialize, Serialize};
use crate::decoders::bpe::BPEDecoder;
use crate::decoders::byte_fallback::ByteFallback;
use crate::decoders::ctc::CTC;
use crate::decoders::fuse::Fuse;
use crate::decoders::sequence::Sequence;
use crate::decoders::strip::Strip;
use crate::decoders::wordpiece::WordPiece;
use crate::normalizers::replace::Replace;
use crate::pre_tokenizers::byte_level::ByteLevel;
use crate::pre_tokenizers::metaspace::Metaspace;
use crate::{Decoder, Result};
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(untagged)]
pub enum DecoderWrapper {
BPE(BPEDecoder),
ByteLevel(ByteLevel),
WordPiece(WordPiece),
Metaspace(Metaspace),
CTC(CTC),
Sequence(Sequence),
Replace(Replace),
Fuse(Fuse),
Strip(Strip),
ByteFallback(ByteFallback),
}
impl Decoder for DecoderWrapper {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
match self {
Self::BPE(bpe) => bpe.decode_chain(tokens),
Self::ByteLevel(bl) => bl.decode_chain(tokens),
Self::Metaspace(ms) => ms.decode_chain(tokens),
Self::WordPiece(wp) => wp.decode_chain(tokens),
Self::CTC(ctc) => ctc.decode_chain(tokens),
Self::Sequence(seq) => seq.decode_chain(tokens),
Self::Replace(seq) => seq.decode_chain(tokens),
Self::ByteFallback(bf) => bf.decode_chain(tokens),
Self::Strip(bf) => bf.decode_chain(tokens),
Self::Fuse(bf) => bf.decode_chain(tokens),
}
}
}
impl_enum_from!(BPEDecoder, DecoderWrapper, BPE);
impl_enum_from!(ByteLevel, DecoderWrapper, ByteLevel);
impl_enum_from!(ByteFallback, DecoderWrapper, ByteFallback);
impl_enum_from!(Fuse, DecoderWrapper, Fuse);
impl_enum_from!(Strip, DecoderWrapper, Strip);
impl_enum_from!(Metaspace, DecoderWrapper, Metaspace);
impl_enum_from!(WordPiece, DecoderWrapper, WordPiece);
impl_enum_from!(CTC, DecoderWrapper, CTC);
impl_enum_from!(Sequence, DecoderWrapper, Sequence);
impl_enum_from!(Replace, DecoderWrapper, Replace);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn decoder_serialization() {
let json = r#"{"type":"Sequence","decoders":[{"type":"ByteFallback"},{"type":"Metaspace","replacement":"▁","add_prefix_space":true,"prepend_scheme":"always"}]}"#;
let decoder: DecoderWrapper = serde_json::from_str(json).unwrap();
let serialized = serde_json::to_string(&decoder).unwrap();
assert_eq!(serialized, json);
}
#[test]
fn decoder_serialization_other_no_arg() {
let json = r#"{"type":"Sequence","decoders":[{"type":"Fuse"},{"type":"Metaspace","replacement":"▁","add_prefix_space":true,"prepend_scheme":"always"}]}"#;
let decoder: DecoderWrapper = serde_json::from_str(json).unwrap();
let serialized = serde_json::to_string(&decoder).unwrap();
assert_eq!(serialized, json);
}
#[test]
fn decoder_serialization_no_decode() {
let json = r#"{"type":"Sequence","decoders":[{},{"type":"Metaspace","replacement":"▁","add_prefix_space":true,"prepend_scheme":"always"}]}"#;
assert!(serde_json::from_str::<DecoderWrapper>(json).is_err());
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/ctc.rs | use crate::decoders::wordpiece;
use crate::tokenizer::{Decoder, Result};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
/// The CTC (Connectionist Temporal Classification) decoder takes care
/// of sanitizing a list of inputs token.
/// Due to some alignement problem the output of some models can come
/// with duplicated token.
#[serde(tag = "type")]
#[non_exhaustive]
pub struct CTC {
/// The pad token used by CTC to delimit a new token.
pub pad_token: String,
/// The word delimiter token. It will be replaced by a `<space>`.
pub word_delimiter_token: String,
/// Whether to cleanup some tokenization artifacts.
/// Mainly spaces before punctuation, and some abbreviated english forms.
pub cleanup: bool,
}
impl CTC {
pub fn new(pad_token: String, word_delimiter_token: String, cleanup: bool) -> Self {
Self {
pad_token,
word_delimiter_token,
cleanup,
}
}
}
impl Default for CTC {
fn default() -> Self {
Self {
pad_token: "<pad>".to_string(),
word_delimiter_token: "|".to_string(),
cleanup: true,
}
}
}
impl Decoder for CTC {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
Ok(tokens
.into_iter()
.dedup()
.filter_map(|token| {
let mut replaced = token.replace(&self.pad_token, "");
if self.cleanup {
replaced =
wordpiece::cleanup(&replaced).replace(&self.word_delimiter_token, " ");
}
if replaced.is_empty() {
None
} else {
Some(replaced)
}
})
.collect())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn handmade_sample() {
let ctc_decoder = CTC::default();
let id_to_string_result = "<pad> <pad> h e e l l <pad> l o o o <pad>"
.split(' ')
.map(|s| s.to_string())
.collect();
assert_eq!(
ctc_decoder.decode_chain(id_to_string_result).unwrap(),
vec!["h", "e", "l", "l", "o"]
);
}
#[test]
fn handmade_with_delimiter_sample() {
let ctc_decoder = CTC::default();
let id_to_string_result = "<pad> <pad> h e e l l <pad> l o o o <pad> <pad> | <pad> w o o o r <pad> <pad> l l d <pad> <pad> <pad> <pad>"
.split(' ')
.map(|s| s.to_string())
.collect();
assert_eq!(
ctc_decoder.decode_chain(id_to_string_result).unwrap(),
vec!["h", "e", "l", "l", "o", " ", "w", "o", "r", "l", "d"]
);
}
#[test]
fn librispeech_sample() {
let ctc_decoder = CTC::default();
let id_to_string_result = "<pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> A | | <pad> M <pad> <pad> <pad> <pad> A <pad> <pad> N <pad> <pad> <pad> | | | <pad> <pad> <pad> <pad> S <pad> <pad> <pad> A I <pad> D D | | T T <pad> O <pad> | | T H E E | | | <pad> U U <pad> N N <pad> I <pad> <pad> V <pad> <pad> <pad> E R R <pad> <pad> <pad> S E E | | <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> S S <pad> <pad> <pad> <pad> I <pad> R R <pad> <pad> | | | <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> I <pad> <pad> <pad> | <pad> <pad> <pad> E X <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> I <pad> S <pad> <pad> T <pad> <pad> | | <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad>".split(' ').map(|s| s.to_string()).collect();
assert_eq!(
ctc_decoder.decode_chain(id_to_string_result).unwrap(),
vec![
"A", " ", "M", "A", "N", " ", "S", "A", "I", "D", " ", "T", "O", " ", "T", "H",
"E", " ", "U", "N", "I", "V", "E", "R", "S", "E", " ", "S", "I", "R", " ", "I",
" ", "E", "X", "I", "S", "T", " "
]
);
}
#[test]
fn another_librispeech_sample() {
let ctc_decoder = CTC::default();
let id_to_string_result = "<pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> H <pad> I <pad> S S | | <pad> <pad> <pad> I N <pad> <pad> S <pad> T T <pad> <pad> A N C C T <pad> | | | | | <pad> <pad> <pad> <pad> P <pad> <pad> <pad> <pad> A <pad> <pad> N N N <pad> <pad> I <pad> C <pad> <pad> | | <pad> W <pad> <pad> A S <pad> | | <pad> <pad> <pad> F <pad> <pad> O L <pad> <pad> L L O O W E E D | | <pad> B <pad> <pad> <pad> Y <pad> | | | A | | <pad> S S S <pad> M M <pad> <pad> <pad> A L L <pad> <pad> <pad> <pad> L <pad> | | | <pad> <pad> <pad> <pad> S H H <pad> <pad> <pad> <pad> A R R <pad> <pad> P <pad> <pad> | <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> B <pad> <pad> L L <pad> <pad> <pad> <pad> <pad> O W W <pad> <pad> | | | <pad> <pad> <pad> <pad> <pad> <pad> <pad> H <pad> <pad> <pad> <pad> <pad> <pad> <pad> I G H H | | <pad> <pad> O N <pad> | | H <pad> I S S | | <pad> <pad> C H H <pad> <pad> <pad> E <pad> S S <pad> T T <pad> <pad> | | | <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad> <pad>".split(' ').map(|s| s.to_string()).collect();
assert_eq!(
ctc_decoder.decode_chain(id_to_string_result).unwrap(),
vec![
"H", "I", "S", " ", "I", "N", "S", "T", "A", "N", "C", "T", " ", "P", "A", "N",
"I", "C", " ", "W", "A", "S", " ", "F", "O", "L", "L", "O", "W", "E", "D", " ",
"B", "Y", " ", "A", " ", "S", "M", "A", "L", "L", " ", "S", "H", "A", "R", "P",
" ", "B", "L", "O", "W", " ", "H", "I", "G", "H", " ", "O", "N", " ", "H", "I",
"S", " ", "C", "H", "E", "S", "T", " "
]
);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/byte_fallback.rs | use crate::tokenizer::{Decoder, Result};
use monostate::MustBe;
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Clone, Debug, Serialize, Default)]
/// ByteFallback is a simple trick which converts tokens looking like `<0x61>`
/// to pure bytes, and attempts to make them into a string. If the tokens
/// cannot be decoded you will get � instead for each inconvertable byte token
#[non_exhaustive]
pub struct ByteFallback {
#[serde(rename = "type")]
type_: MustBe!("ByteFallback"),
}
impl ByteFallback {
pub fn new() -> Self {
Self {
type_: MustBe!("ByteFallback"),
}
}
}
impl Decoder for ByteFallback {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
let mut new_tokens: Vec<String> = vec![];
let mut previous_byte_tokens: Vec<u8> = vec![];
for token in tokens {
let bytes = if token.len() == 6 && token.starts_with("<0x") && token.ends_with('>') {
if let Ok(byte) = u8::from_str_radix(&token[3..5], 16) {
Some(byte)
} else {
None
}
} else {
None
};
if let Some(bytes) = bytes {
previous_byte_tokens.push(bytes);
} else {
if !previous_byte_tokens.is_empty() {
if let Ok(string) = String::from_utf8(previous_byte_tokens.clone()) {
new_tokens.push(string);
} else {
for _ in 0..previous_byte_tokens.len() {
new_tokens.push("�".into());
}
}
previous_byte_tokens.clear();
}
new_tokens.push(token);
}
}
if !previous_byte_tokens.is_empty() {
if let Ok(string) = String::from_utf8(previous_byte_tokens.clone()) {
new_tokens.push(string);
} else {
for _ in 0..previous_byte_tokens.len() {
new_tokens.push("�".into());
}
}
}
Ok(new_tokens)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn decode() {
let decoder = ByteFallback::new();
let res = decoder
.decode_chain(vec!["Hey".into(), "friend!".into()])
.unwrap();
assert_eq!(res, vec!["Hey", "friend!"]);
let res = decoder.decode_chain(vec!["<0x61>".into()]).unwrap();
assert_eq!(res, vec!["a"]);
let res = decoder.decode_chain(vec!["<0xE5>".into()]).unwrap();
assert_eq!(res, vec!["�"]);
let res = decoder
.decode_chain(vec!["<0xE5>".into(), "<0x8f>".into()])
.unwrap();
assert_eq!(res, vec!["�", "�"]);
// 叫
let res = decoder
.decode_chain(vec!["<0xE5>".into(), "<0x8f>".into(), "<0xab>".into()])
.unwrap();
assert_eq!(res, vec!["叫"]);
let res = decoder
.decode_chain(vec![
"<0xE5>".into(),
"<0x8f>".into(),
"<0xab>".into(),
"a".into(),
])
.unwrap();
assert_eq!(res, vec!["叫", "a"]);
let res = decoder
.decode_chain(vec!["<0xE5>".into(), "<0x8f>".into(), "a".into()])
.unwrap();
assert_eq!(res, vec!["�", "�", "a"]);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/bpe.rs | use crate::tokenizer::{Decoder, Result};
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Clone, Debug, Serialize)]
/// Allows decoding Original BPE by joining all the tokens and then replacing
/// the suffix used to identify end-of-words by whitespaces
#[serde(tag = "type")]
#[non_exhaustive]
pub struct BPEDecoder {
pub suffix: String,
}
impl BPEDecoder {
pub fn new(suffix: String) -> Self {
Self { suffix }
}
}
impl Default for BPEDecoder {
fn default() -> Self {
Self::new("</w>".into())
}
}
impl Decoder for BPEDecoder {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
let n = tokens.len() - 1;
Ok(tokens
.into_iter()
.enumerate()
.map(|(i, token)| {
let replacement = if i == n { "" } else { " " };
token.replace(&self.suffix, replacement)
})
.collect())
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/wordpiece.rs | use crate::tokenizer::{Decoder, Result};
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Clone, Debug, Serialize)]
/// The WordPiece decoder takes care of decoding a list of wordpiece tokens
/// back into a readable string.
#[serde(tag = "type")]
#[non_exhaustive]
pub struct WordPiece {
/// The prefix to be used for continuing subwords
pub prefix: String,
/// Whether to cleanup some tokenization artifacts (spaces before punctuation, ...)
pub cleanup: bool,
}
impl WordPiece {
pub fn new(prefix: String, cleanup: bool) -> Self {
Self { prefix, cleanup }
}
}
impl Default for WordPiece {
fn default() -> Self {
Self {
prefix: "##".to_owned(),
cleanup: true,
}
}
}
pub fn cleanup(dirty_input: &str) -> String {
dirty_input
.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" do not", " don't")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
}
impl Decoder for WordPiece {
fn decode_chain(&self, mut tokens: Vec<String>) -> Result<Vec<String>> {
tokens
.iter_mut()
.enumerate()
.map(|(i, token)| {
if i != 0 {
if token.starts_with(&self.prefix) {
*token = token.replacen(&self.prefix, "", 1);
} else {
*token = format!(" {}", token);
}
}
if self.cleanup {
*token = cleanup(token);
}
Ok(token.to_string())
})
.collect::<Result<_>>()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn wordpiece_decoder() {
let decoder = WordPiece::new("##".to_string(), false);
assert_eq!(
decoder
.decode(vec![
"##uelo".to_string(),
"Ara".to_string(),
"##új".to_string(),
"##o".to_string(),
"No".to_string(),
"##guera".to_string()
])
.unwrap(),
"##uelo Araújo Noguera"
);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/strip.rs | use crate::tokenizer::{Decoder, Result};
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Clone, Debug, Serialize, Default)]
/// Strip is a simple trick which converts tokens looking like `<0x61>`
/// to pure bytes, and attempts to make them into a string. If the tokens
/// cannot be decoded you will get � instead for each inconvertable byte token
#[serde(tag = "type")]
#[non_exhaustive]
pub struct Strip {
pub content: char,
pub start: usize,
pub stop: usize,
}
impl Strip {
pub fn new(content: char, start: usize, stop: usize) -> Self {
Self {
content,
start,
stop,
}
}
}
impl Decoder for Strip {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
Ok(tokens
.into_iter()
.map(|token| {
let chars: Vec<char> = token.chars().collect();
let mut start_cut = 0;
for (i, &c) in chars.iter().enumerate().take(self.start) {
if c == self.content {
start_cut = i + 1;
continue;
} else {
break;
}
}
let mut stop_cut = chars.len();
for i in 0..self.stop {
let index = chars.len() - i - 1;
if chars[index] == self.content {
stop_cut = index;
continue;
} else {
break;
}
}
let new_token: String = chars[start_cut..stop_cut].iter().collect();
new_token
})
.collect())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn decode() {
let decoder = Strip::new('H', 1, 0);
let res = decoder
.decode_chain(vec!["Hey".into(), " friend!".into(), "HHH".into()])
.unwrap();
assert_eq!(res, vec!["ey", " friend!", "HH"]);
let decoder = Strip::new('y', 0, 1);
let res = decoder
.decode_chain(vec!["Hey".into(), " friend!".into()])
.unwrap();
assert_eq!(res, vec!["He", " friend!"]);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/truncation.rs | use crate::tokenizer::{Encoding, Result};
use serde::{Deserialize, Serialize};
use std::cmp;
use std::mem;
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq, Default)]
pub enum TruncationDirection {
Left,
#[default]
Right,
}
impl std::convert::AsRef<str> for TruncationDirection {
fn as_ref(&self) -> &str {
match self {
TruncationDirection::Left => "left",
TruncationDirection::Right => "right",
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TruncationParams {
#[serde(default)]
pub direction: TruncationDirection,
pub max_length: usize,
pub strategy: TruncationStrategy,
pub stride: usize,
}
impl Default for TruncationParams {
fn default() -> Self {
Self {
max_length: 512,
strategy: TruncationStrategy::default(),
stride: 0,
direction: TruncationDirection::default(),
}
}
}
#[derive(thiserror::Error, Debug)]
pub enum TruncationError {
/// We are supposed to truncate the pair sequence, but it has not been provided.
#[error("Truncation error: Second sequence not provided")]
SecondSequenceNotProvided,
/// We cannot truncate the target sequence enough to respect the provided max length.
#[error("Truncation error: Sequence to truncate too short to respect the provided max_length")]
SequenceTooShort,
}
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq)]
pub enum TruncationStrategy {
LongestFirst,
OnlyFirst,
OnlySecond,
}
impl Default for TruncationStrategy {
fn default() -> Self {
Self::LongestFirst
}
}
impl std::convert::AsRef<str> for TruncationStrategy {
fn as_ref(&self) -> &str {
match self {
Self::LongestFirst => "longest_first",
Self::OnlyFirst => "only_first",
Self::OnlySecond => "only_second",
}
}
}
pub fn truncate_encodings(
mut encoding: Encoding,
mut pair_encoding: Option<Encoding>,
params: &TruncationParams,
) -> Result<(Encoding, Option<Encoding>)> {
if params.max_length == 0 {
encoding.truncate(0, params.stride, params.direction);
if let Some(other_encoding) = pair_encoding.as_mut() {
other_encoding.truncate(0, params.stride, params.direction);
}
return Ok((encoding, pair_encoding));
}
let total_length = encoding.get_ids().len()
+ pair_encoding
.as_ref()
.map(|e| e.get_ids().len())
.unwrap_or(0);
let to_remove = if total_length > params.max_length {
total_length - params.max_length
} else {
return Ok((encoding, pair_encoding));
};
match params.strategy {
TruncationStrategy::LongestFirst => {
if let Some(other_encoding) = pair_encoding.as_mut() {
// Assuming n1 <= n2, there are 3 cases
// Case 1:
// No truncation needs to be performed.
// This scenario is handled before the match.
// Case 2:
// Only the longer input needs to be truncated.
// n1 = n1
// n2 = max_length - n1
// Case 3:
// Both inputs must be truncated.
// n1 = max_length / 2
// n2 = n1 + max_length % 2
let mut n1 = encoding.get_ids().len();
let mut n2 = other_encoding.get_ids().len();
let mut swap = false;
// Ensure n1 is the length of the shortest input
if n1 > n2 {
swap = true;
mem::swap(&mut n1, &mut n2);
}
if n1 > params.max_length {
// This needs to be a special case
// to avoid max_length - n1 < 0
// since n1 and n2 are unsigned
n2 = n1;
} else {
n2 = cmp::max(n1, params.max_length - n1);
}
if n1 + n2 > params.max_length {
n1 = params.max_length / 2;
n2 = n1 + params.max_length % 2;
}
// Swap lengths if we swapped previosuly
if swap {
mem::swap(&mut n1, &mut n2);
}
encoding.truncate(n1, params.stride, params.direction);
other_encoding.truncate(n2, params.stride, params.direction);
} else {
encoding.truncate(total_length - to_remove, params.stride, params.direction);
}
}
TruncationStrategy::OnlyFirst | TruncationStrategy::OnlySecond => {
let target = if params.strategy == TruncationStrategy::OnlyFirst {
Ok(&mut encoding)
} else if let Some(encoding) = pair_encoding.as_mut() {
Ok(encoding)
} else {
Err(Box::new(TruncationError::SecondSequenceNotProvided))
}?;
let target_len = target.get_ids().len();
if target_len > to_remove {
target.truncate(target_len - to_remove, params.stride, params.direction);
} else {
return Err(Box::new(TruncationError::SequenceTooShort));
}
}
}
Ok((encoding, pair_encoding))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tokenizer::Encoding;
use std::collections::HashMap;
fn get_empty() -> Encoding {
Encoding::new(
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
HashMap::new(),
)
}
fn get_short() -> Encoding {
Encoding::new(
vec![1, 2],
vec![0, 0],
vec![String::from("a"), String::from("b")],
vec![Some(0), Some(1)],
vec![(0, 1), (1, 2)],
vec![0, 0],
vec![1, 1],
vec![],
HashMap::new(),
)
}
fn get_medium() -> Encoding {
Encoding::new(
vec![3, 4, 5, 6],
vec![0, 0, 0, 0],
vec![
String::from("d"),
String::from("e"),
String::from("f"),
String::from("g"),
],
vec![Some(0), Some(1), Some(2), Some(3)],
vec![(0, 1), (1, 2), (2, 3), (3, 4)],
vec![0, 0, 0, 0],
vec![1, 1, 1, 1],
vec![],
HashMap::new(),
)
}
fn get_long() -> Encoding {
Encoding::new(
vec![7, 8, 9, 10, 11, 12, 13, 14],
vec![0, 0, 0, 0, 0, 0, 0, 0],
vec![
String::from("h"),
String::from("i"),
String::from("j"),
String::from("k"),
String::from("l"),
String::from("m"),
String::from("n"),
String::from("o"),
],
vec![
Some(0),
Some(1),
Some(2),
Some(3),
Some(4),
Some(5),
Some(6),
Some(7),
],
vec![
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(6, 8),
],
vec![0, 0, 0, 0, 0, 0, 0, 0],
vec![1, 1, 1, 1, 1, 1, 1, 1],
vec![],
HashMap::new(),
)
}
fn truncate_and_assert(
encoding1: Encoding,
encoding2: Encoding,
params: &TruncationParams,
n1: usize,
n2: usize,
) {
match truncate_encodings(encoding1, Some(encoding2), params) {
Ok((e1, Some(e2))) => {
assert!(e1.get_ids().len() == n1);
assert!(e2.get_ids().len() == n2);
}
_ => panic!(),
};
}
#[test]
fn truncate_encodings_longest_first() {
let params = TruncationParams {
max_length: 7,
strategy: TruncationStrategy::LongestFirst,
stride: 0,
direction: TruncationDirection::Right,
};
truncate_and_assert(get_empty(), get_empty(), ¶ms, 0, 0);
truncate_and_assert(get_empty(), get_short(), ¶ms, 0, 2);
truncate_and_assert(get_empty(), get_medium(), ¶ms, 0, 4);
truncate_and_assert(get_empty(), get_long(), ¶ms, 0, 7);
truncate_and_assert(get_short(), get_empty(), ¶ms, 2, 0);
truncate_and_assert(get_short(), get_short(), ¶ms, 2, 2);
truncate_and_assert(get_short(), get_medium(), ¶ms, 2, 4);
truncate_and_assert(get_short(), get_long(), ¶ms, 2, 5);
truncate_and_assert(get_medium(), get_empty(), ¶ms, 4, 0);
truncate_and_assert(get_medium(), get_short(), ¶ms, 4, 2);
truncate_and_assert(get_medium(), get_medium(), ¶ms, 3, 4);
truncate_and_assert(get_medium(), get_long(), ¶ms, 3, 4);
truncate_and_assert(get_long(), get_empty(), ¶ms, 7, 0);
truncate_and_assert(get_long(), get_short(), ¶ms, 5, 2);
truncate_and_assert(get_long(), get_medium(), ¶ms, 4, 3);
truncate_and_assert(get_long(), get_long(), ¶ms, 3, 4);
}
#[test]
fn truncate_encodings_empty() {
let params = TruncationParams {
max_length: 0,
strategy: TruncationStrategy::LongestFirst,
stride: 0,
direction: TruncationDirection::Right,
};
truncate_and_assert(get_empty(), get_short(), ¶ms, 0, 0);
truncate_and_assert(get_medium(), get_medium(), ¶ms, 0, 0);
truncate_and_assert(get_long(), get_long(), ¶ms, 0, 0);
}
#[test]
fn test_deserialize_defaults() {
let old_truncation_params = r#"{"max_length":256,"strategy":"LongestFirst","stride":0}"#;
let params: TruncationParams = serde_json::from_str(old_truncation_params).unwrap();
assert_eq!(params.direction, TruncationDirection::Right);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/from_pretrained.rs | use crate::Result;
use hf_hub::{api::sync::ApiBuilder, Repo, RepoType};
use std::collections::HashMap;
use std::path::PathBuf;
/// Defines the aditional parameters available for the `from_pretrained` function
#[derive(Debug, Clone)]
pub struct FromPretrainedParameters {
pub revision: String,
pub user_agent: HashMap<String, String>,
pub auth_token: Option<String>,
}
impl Default for FromPretrainedParameters {
fn default() -> Self {
Self {
revision: "main".into(),
user_agent: HashMap::new(),
auth_token: None,
}
}
}
/// Downloads and cache the identified tokenizer if it exists on
/// the Hugging Face Hub, and returns a local path to the file
pub fn from_pretrained<S: AsRef<str>>(
identifier: S,
params: Option<FromPretrainedParameters>,
) -> Result<PathBuf> {
let identifier: String = identifier.as_ref().to_string();
let valid_chars = ['-', '_', '.', '/'];
let is_valid_char = |x: char| x.is_alphanumeric() || valid_chars.contains(&x);
let valid = identifier.chars().all(is_valid_char);
let valid_chars_stringified = valid_chars
.iter()
.fold(vec![], |mut buf, x| {
buf.push(format!("'{}'", x));
buf
})
.join(", "); // "'/', '-', '_', '.'"
if !valid {
return Err(format!(
"Model \"{}\" contains invalid characters, expected only alphanumeric or {valid_chars_stringified}",
identifier
)
.into());
}
let params = params.unwrap_or_default();
let revision = ¶ms.revision;
let valid_revision = revision.chars().all(is_valid_char);
if !valid_revision {
return Err(format!(
"Revision \"{}\" contains invalid characters, expected only alphanumeric or {valid_chars_stringified}",
revision
)
.into());
}
let mut builder = ApiBuilder::new();
if let Some(token) = params.auth_token {
builder = builder.with_token(Some(token));
}
let api = builder.build()?;
let repo = Repo::with_revision(identifier, RepoType::Model, params.revision);
let api = api.repo(repo);
Ok(api.get("tokenizer.json")?)
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/mod.rs | pub(crate) mod cache;
#[cfg(feature = "http")]
pub(crate) mod from_pretrained;
#[cfg(feature = "unstable_wasm")]
mod fancy;
#[cfg(feature = "unstable_wasm")]
pub use fancy::SysRegex;
#[cfg(not(feature = "unstable_wasm"))]
mod onig;
#[cfg(not(feature = "unstable_wasm"))]
pub use crate::utils::onig::SysRegex;
pub mod iter;
pub mod padding;
pub mod parallelism;
pub(crate) mod progress;
pub mod truncation;
use serde::{Serialize, Serializer};
use std::collections::{BTreeMap, HashMap};
pub(crate) fn ordered_map<S, K, V>(
value: &HashMap<K, V>,
serializer: S,
) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
K: Serialize + std::cmp::Ord,
V: Serialize,
{
let ordered: BTreeMap<_, _> = value.iter().collect();
ordered.serialize(serializer)
}
macro_rules! impl_enum_from (
($from_ty:ty, $enum:ty, $variant:ident) => {
impl From<$from_ty> for $enum {
fn from(from: $from_ty) -> Self {
<$enum>::$variant(from)
}
}
}
);
/// Implement `serde::{Serialize, Serializer}` with `#[serde(tag = "type")]` attribute for a given struct.
/// Panic when a json string being deserilized misses field `type`.
///
/// # Examples
///
/// ```
/// # #[macro_use] extern crate tokenizers;
/// use serde::{Serialize, Deserialize};
///
/// fn main() {
/// impl_serde_type!{
/// #[derive(Debug)]
/// struct Point {
/// x: i32,
/// #[serde(default = "default_y")]
/// y: i32,
/// }
/// }
/// fn default_y() -> i32 {
/// 5
/// }
///
/// let point = Point { x: 1, y: 2 };
/// let serialized_s = r#"{"type":"Point","x":1,"y":2}"#;
/// assert_eq!(serde_json::to_string(&point).unwrap(), serialized_s);
/// }
/// ```
///
/// ```should_panic
/// # #[macro_use] extern crate tokenizers;
/// use serde::{Serialize, Deserialize};
///
/// fn main() {
/// impl_serde_type!{
/// #[derive(Debug)]
/// struct Point1D {
/// x: i32,
/// }
/// }
///
/// let serialized_s = r#"{"x":1}"#;
/// let deserialized: Point1D = serde_json::from_str(serialized_s).unwrap();
/// }
/// ```
///
/// # Examples (unit structs)
///
/// ```
/// # #[macro_use] extern crate tokenizers;
/// use serde::{Serialize, Deserialize};
///
/// fn main() {
/// impl_serde_type!{
/// struct Unit;
/// }
///
/// let unit = Unit;
/// let serialized_s = r#"{"type":"Unit"}"#;
/// assert_eq!(serde_json::to_string(&unit).unwrap(), serialized_s);
/// }
/// ```
///
/// ```should_panic
/// # #[macro_use] extern crate tokenizers;
/// use serde::{Serialize, Deserialize};
///
/// fn main() {
/// impl_serde_type!{
/// struct Unit;
/// }
///
/// let serialized_s = r#"{"some_field":1}"#;
/// let deserialized: Unit = serde_json::from_str(serialized_s).unwrap();
/// }
/// ```
#[macro_export]
macro_rules! impl_serde_type{
(
$(#[$meta:meta])*
$vis:vis struct $struct_name:ident {
$(
$(#[$field_meta:meta])*
$field_vis:vis $field_name:ident : $field_type:ty
),*$(,)+
}
) => {
paste::paste!{
$(#[$meta])*
#[derive(Serialize, Deserialize)]
#[serde(tag = "type", from = $struct_name "Deserializer")]
$vis struct $struct_name{
$(
$(#[$field_meta])*
$field_vis $field_name : $field_type,
)*
}
#[doc(hidden)]
$(#[$meta])*
#[derive(Deserialize)]
#[serde(tag = "type", remote = $struct_name "")]
struct [<$struct_name Def>]{
$(
$(#[$field_meta])*
$field_vis $field_name : $field_type,
)*
}
#[doc(hidden)]
#[derive(Deserialize)]
enum [<$struct_name Type>] {
$struct_name,
}
#[doc(hidden)]
#[derive(Deserialize)]
struct [<$struct_name Deserializer>] {
#[allow(dead_code)]
r#type: [<$struct_name Type>],
#[serde(flatten, with = $struct_name "Def")]
r#struct: $struct_name,
}
#[doc(hidden)]
impl std::convert::From<[<$struct_name Deserializer>]> for $struct_name {
fn from(v: [<$struct_name Deserializer>]) -> Self {
v.r#struct
}
}
}
};
(
$(#[$meta:meta])*
$vis:vis struct $struct_name:ident;
) => {
paste::paste!{
$(#[$meta])*
$vis struct $struct_name;
impl serde::Serialize for $struct_name {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> where
S: serde::ser::Serializer {
let helper = [<$struct_name Helper>]{r#type: [<$struct_name Type>]::$struct_name};
helper.serialize(serializer)
}
}
impl<'de> serde::Deserialize<'de> for $struct_name {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let _helper = [<$struct_name Helper>]::deserialize(deserializer)?;
Ok($struct_name)
}
}
#[derive(serde::Serialize, serde::Deserialize)]
enum [<$struct_name Type>] {
$struct_name,
}
#[derive(serde::Serialize, serde::Deserialize)]
struct [<$struct_name Helper>] {
#[allow(dead_code)]
r#type: [<$struct_name Type>],
}
}
}
}
// Re-export macro_rules_attribute
pub use macro_rules_attribute::macro_rules_attribute;
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/cache.rs | use std::borrow::Borrow;
use std::collections::HashMap;
use std::hash::Hash;
use std::sync::RwLock;
/// The default capacity for a `BPE`'s internal cache.
pub static DEFAULT_CACHE_CAPACITY: usize = 10_000;
/// Provides a simple multithread cache to speed up BPE tokenization that will try to read values
/// concurrently but won't block if another thread is writing.
/// The goal is clearly not the accuracy of the content, both get and set
/// are not guaranteed to actually get or set.
#[derive(Debug)]
pub(crate) struct Cache<K, V>
where
K: Eq + Hash + Clone,
V: Clone,
{
map: RwLock<HashMap<K, V>>,
pub capacity: usize,
}
// We dont really care about Cache comparison, so let's make them always equal
impl<K, V> PartialEq for Cache<K, V>
where
K: Eq + Hash + Clone,
V: Clone,
{
fn eq(&self, _other: &Cache<K, V>) -> bool {
true
}
}
impl<K, V> Default for Cache<K, V>
where
K: Eq + Hash + Clone,
V: Clone,
{
fn default() -> Self {
Self::new(DEFAULT_CACHE_CAPACITY)
}
}
impl<K, V> Cache<K, V>
where
K: Eq + Hash + Clone,
V: Clone,
{
/// Create new `Cache` with the given capacity.
pub(crate) fn new(capacity: usize) -> Self {
let map = RwLock::new(HashMap::with_capacity(capacity));
Cache { map, capacity }
}
/// Create a fresh `Cache` with the same configuration.
pub(crate) fn fresh(&self) -> Self {
Self::new(self.capacity)
}
/// Clear the cache.
pub(crate) fn clear(&self) {
self.map.write().unwrap().clear();
}
#[allow(dead_code)]
pub(crate) fn get_values<'a, I, Q>(&self, keys_iter: I) -> Option<Vec<Option<V>>>
where
I: Iterator<Item = &'a Q>,
K: Borrow<Q>,
Q: Hash + Eq + ?Sized + 'a,
{
if let Ok(ref mut cache) = self.map.try_read() {
Some(keys_iter.map(|k| cache.get(k).cloned()).collect())
} else {
None
}
}
pub(crate) fn get<Q>(&self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
if let Ok(ref mut cache) = self.map.try_read() {
cache.get(key).cloned()
} else {
None
}
}
pub(crate) fn set_values<I>(&self, entries: I)
where
I: IntoIterator<Item = (K, V)>,
{
// Before trying to acquire a write lock, we check if we are already at
// capacity with a read handler.
if let Ok(cache) = self.map.try_read() {
if cache.len() >= self.capacity {
// At capacity, so do nothing.
return;
}
} else {
// If we couldn't acquire a read handle then we probably won't be able to acquire
// a write handle one quadrillionth of a second later.
return;
}
// Not at capacity, so try acquiring a write handle.
if let Ok(mut cache) = self.map.try_write() {
let free = self.capacity - cache.len();
cache.extend(entries.into_iter().take(free));
}
}
pub(crate) fn set(&self, key: K, value: V) {
self.set_values(std::iter::once((key, value)))
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/onig.rs | use crate::tokenizer::pattern::Pattern;
use crate::{Offsets, Result};
use onig::Regex;
use std::error::Error;
#[derive(Debug)]
pub struct SysRegex {
regex: Regex,
}
impl SysRegex {
pub fn find_iter<'r, 't>(&'r self, inside: &'t str) -> onig::FindMatches<'r, 't> {
self.regex.find_iter(inside)
}
pub fn new(
regex_str: &str,
) -> std::result::Result<Self, Box<dyn Error + Send + Sync + 'static>> {
Ok(Self {
regex: Regex::new(regex_str)?,
})
}
}
impl Pattern for &Regex {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
if inside.is_empty() {
return Ok(vec![((0, 0), false)]);
}
let mut prev = 0;
let mut splits = Vec::with_capacity(inside.len());
for (start, end) in self.find_iter(inside) {
if prev != start {
splits.push(((prev, start), false));
}
splits.push(((start, end), true));
prev = end;
}
if prev != inside.len() {
splits.push(((prev, inside.len()), false))
}
Ok(splits)
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/iter.rs | //! This comes from the Rust libcore and is duplicated here because it is not exported
//! (cf <https://github.com/rust-lang/rust/blob/25091ed9b7739e12466fb2490baa1e8a2815121c/src/libcore/iter/adapters/mod.rs#L2664>)
//! We are now using the version from <https://stackoverflow.com/questions/44544323/how-to-unzip-a-sequence-of-resulta-b-e-to-a-veca-vecb-and-stop-on-f>
//! because the one from the libcore seems to cause overflowing stacks in some cases
//! It also contains a lines_with_ending that copies std::io::BufRead but keeps line endings.
use std::io::BufRead;
pub struct ResultShunt<I, E> {
iter: I,
error: Option<E>,
}
impl<I, T, E> ResultShunt<I, E>
where
I: Iterator<Item = Result<T, E>>,
{
/// Process the given iterator as if it yielded a `T` instead of a
/// `Result<T, _>`. Any errors will stop the inner iterator and
/// the overall result will be an error.
pub fn process<F, U>(iter: I, mut f: F) -> Result<U, E>
where
F: FnMut(&mut Self) -> U,
{
let mut shunt = ResultShunt::new(iter);
let value = f(shunt.by_ref());
shunt.reconstruct(value)
}
fn new(iter: I) -> Self {
ResultShunt { iter, error: None }
}
/// Consume the adapter and rebuild a `Result` value. This should
/// *always* be called, otherwise any potential error would be
/// lost.
fn reconstruct<U>(self, val: U) -> Result<U, E> {
match self.error {
None => Ok(val),
Some(e) => Err(e),
}
}
}
impl<I, T, E> Iterator for ResultShunt<I, E>
where
I: Iterator<Item = Result<T, E>>,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
match self.iter.next() {
Some(Ok(v)) => Some(v),
Some(Err(e)) => {
self.error = Some(e);
None
}
None => None,
}
}
}
/// Copied from std::io::BufRead but keep newline characters.
#[derive(Debug)]
pub struct Lines<B> {
buf: B,
}
pub trait LinesWithEnding<B> {
fn lines_with_ending(self) -> Lines<B>;
}
impl<B> LinesWithEnding<B> for B
where
B: BufRead,
{
fn lines_with_ending(self) -> Lines<B> {
Lines::<B> { buf: self }
}
}
impl<B: BufRead> Iterator for Lines<B> {
type Item = std::io::Result<String>;
fn next(&mut self) -> Option<Self::Item> {
let mut buf = String::new();
match self.buf.read_line(&mut buf) {
Ok(0) => None,
Ok(_n) => {
// if buf.ends_with('\n') {
// buf.pop();
// if buf.ends_with('\r') {
// buf.pop();
// }
// }
Some(Ok(buf))
}
Err(e) => Some(Err(e)),
}
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/progress.rs | #[cfg(feature = "progressbar")]
pub(crate) use indicatif::{ProgressBar, ProgressStyle};
#[cfg(not(feature = "progressbar"))]
mod progressbar {
use std::borrow::Cow;
pub struct ProgressBar;
impl ProgressBar {
pub fn new(_length: u64) -> Self {
Self {}
}
pub fn set_length(&self, _length: u64) {}
pub fn set_message(&self, _message: impl Into<Cow<'static, str>>) {}
pub fn finish(&self) {}
pub fn reset(&self) {}
pub fn inc(&self, _inc: u64) {}
pub fn set_style(&self, _style: ProgressStyle) {}
}
pub struct ProgressStyle {}
impl ProgressStyle {
pub fn default_bar() -> Self {
Self {}
}
pub fn template(self, _template: &str) -> Result<Self, String> {
Ok(self)
}
}
}
#[cfg(not(feature = "progressbar"))]
pub(crate) use progressbar::{ProgressBar, ProgressStyle};
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/parallelism.rs | //!
//! This module defines helpers to allow optional Rayon usage.
//!
use rayon::iter::IterBridge;
use rayon::prelude::*;
use rayon_cond::CondIterator;
// Re-export rayon current_num_threads
pub use rayon::current_num_threads;
pub const ENV_VARIABLE: &str = "TOKENIZERS_PARALLELISM";
// Reading/Writing this variable should always happen on the main thread
static mut USED_PARALLELISM: bool = false;
/// Check if the TOKENIZERS_PARALLELISM env variable has been explicitly set
pub fn is_parallelism_configured() -> bool {
std::env::var(ENV_VARIABLE).is_ok()
}
/// Check if at some point we used a parallel iterator
pub fn has_parallelism_been_used() -> bool {
unsafe { USED_PARALLELISM }
}
/// Get the currently set value for `TOKENIZERS_PARALLELISM` env variable
pub fn get_parallelism() -> bool {
match std::env::var(ENV_VARIABLE) {
Ok(mut v) => {
v.make_ascii_lowercase();
!matches!(v.as_ref(), "" | "off" | "false" | "f" | "no" | "n" | "0")
}
Err(_) => true, // If we couldn't get the variable, we use the default
}
}
/// Set the value for `TOKENIZERS_PARALLELISM` for the current process
pub fn set_parallelism(val: bool) {
std::env::set_var(ENV_VARIABLE, if val { "true" } else { "false" })
}
/// Allows to convert into an iterator that can be executed either parallelly or serially.
///
/// The choice is made according to the currently set `TOKENIZERS_PARALLELISM` environment variable.
/// This variable can have one of the following values
/// - False => "" (empty value), "false", "f", "off", "no", "n", "0"
/// - True => Any other value
///
pub trait MaybeParallelIterator<P, S>
where
P: ParallelIterator,
S: Iterator<Item = P::Item>,
{
/// Convert ourself in a CondIterator, that will be executed either in parallel or serially,
/// based solely on the `TOKENIZERS_PARALLELISM` environment variable
fn into_maybe_par_iter(self) -> CondIterator<P, S>;
/// Convert ourself in a CondIterator, that will be executed either in parallel or serially,
/// based on both the `TOKENIZERS_PARALLELISM` environment variable and the provided bool.
/// Both must be true to run with parallelism activated.
fn into_maybe_par_iter_cond(self, cond: bool) -> CondIterator<P, S>;
}
impl<P, S, I> MaybeParallelIterator<P, S> for I
where
I: IntoParallelIterator<Iter = P, Item = P::Item> + IntoIterator<IntoIter = S, Item = S::Item>,
P: ParallelIterator,
S: Iterator<Item = P::Item>,
{
fn into_maybe_par_iter(self) -> CondIterator<P, S> {
let parallelism = get_parallelism();
if parallelism {
unsafe { USED_PARALLELISM = true };
}
CondIterator::new(self, parallelism)
}
fn into_maybe_par_iter_cond(self, cond: bool) -> CondIterator<P, S> {
if cond {
self.into_maybe_par_iter()
} else {
CondIterator::from_serial(self)
}
}
}
/// Shared reference version of MaybeParallelIterator, works the same but returns an iterator
/// over references, does not consume self
pub trait MaybeParallelRefIterator<'data, P, S>
where
P: ParallelIterator,
S: Iterator<Item = P::Item>,
P::Item: 'data,
{
fn maybe_par_iter(&'data self) -> CondIterator<P, S>;
fn maybe_par_iter_cond(&'data self, cond: bool) -> CondIterator<P, S>;
}
impl<'data, P, S, I: 'data + ?Sized> MaybeParallelRefIterator<'data, P, S> for I
where
&'data I: MaybeParallelIterator<P, S>,
P: ParallelIterator,
S: Iterator<Item = P::Item>,
P::Item: 'data,
{
fn maybe_par_iter(&'data self) -> CondIterator<P, S> {
self.into_maybe_par_iter()
}
fn maybe_par_iter_cond(&'data self, cond: bool) -> CondIterator<P, S> {
self.into_maybe_par_iter_cond(cond)
}
}
/// Exclusive reference version of MaybeParallelIterator, works the same but returns an iterator
/// over mutable references, does not consume self
pub trait MaybeParallelRefMutIterator<'data, P, S>
where
P: ParallelIterator,
S: Iterator<Item = P::Item>,
P::Item: 'data,
{
fn maybe_par_iter_mut(&'data mut self) -> CondIterator<P, S>;
fn maybe_par_iter_mut_cond(&'data mut self, cond: bool) -> CondIterator<P, S>;
}
impl<'data, P, S, I: 'data + ?Sized> MaybeParallelRefMutIterator<'data, P, S> for I
where
&'data mut I: MaybeParallelIterator<P, S>,
P: ParallelIterator,
S: Iterator<Item = P::Item>,
P::Item: 'data,
{
fn maybe_par_iter_mut(&'data mut self) -> CondIterator<P, S> {
self.into_maybe_par_iter()
}
fn maybe_par_iter_mut_cond(&'data mut self, cond: bool) -> CondIterator<P, S> {
self.into_maybe_par_iter_cond(cond)
}
}
/// Converts any serial iterator into a CondIterator, that can either run parallelly or serially.
pub trait MaybeParallelBridge<T, S>
where
S: Iterator<Item = T> + Send,
T: Send,
{
fn maybe_par_bridge(self) -> CondIterator<IterBridge<S>, S>;
fn maybe_par_bridge_cond(self, cond: bool) -> CondIterator<IterBridge<S>, S>;
}
impl<T, S> MaybeParallelBridge<T, S> for S
where
S: Iterator<Item = T> + Send,
T: Send,
{
fn maybe_par_bridge(self) -> CondIterator<IterBridge<S>, S> {
let iter = CondIterator::from_serial(self);
if get_parallelism() {
unsafe { USED_PARALLELISM = true };
CondIterator::from_parallel(iter.into_parallel().right().unwrap())
} else {
iter
}
}
fn maybe_par_bridge_cond(self, cond: bool) -> CondIterator<IterBridge<S>, S> {
if cond {
self.maybe_par_bridge()
} else {
CondIterator::from_serial(self)
}
}
}
/// Allows to convert into `chunks` that can be executed either parallelly or serially.
pub trait MaybeParallelSlice<'data, T>
where
T: Sync,
{
/// Create a CondIterator, that will be executed either in parallel or serially,
/// based solely on the `TOKENIZERS_PARALLELISM` environment variable
fn maybe_par_chunks(
&'_ self,
chunk_size: usize,
) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>>;
/// Create a CondIterator, that will be executed either in parallel or serially,
/// based on both the `TOKENIZERS_PARALLELISM` environment variable and the provided bool.
/// Both must be true to run with parallelism activated.
fn maybe_par_chunks_cond(
&'_ self,
cond: bool,
chunk_size: usize,
) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>>;
}
impl<T> MaybeParallelSlice<'_, T> for [T]
where
T: Sync,
{
fn maybe_par_chunks(
&'_ self,
chunk_size: usize,
) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>> {
let parallelism = get_parallelism();
if parallelism {
CondIterator::from_parallel(self.par_chunks(chunk_size))
} else {
CondIterator::from_serial(self.chunks(chunk_size))
}
}
fn maybe_par_chunks_cond(
&'_ self,
cond: bool,
chunk_size: usize,
) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>> {
if cond {
self.maybe_par_chunks(chunk_size)
} else {
CondIterator::from_serial(self.chunks(chunk_size))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_maybe_parallel_iterator() {
let mut v = vec![1u32, 2, 3, 4, 5, 6];
assert_eq!(v.maybe_par_iter().sum::<u32>(), 21);
assert_eq!(
v.maybe_par_iter_mut()
.map(|v| {
*v *= 2;
*v
})
.sum::<u32>(),
42
);
assert_eq!(v.maybe_par_iter().sum::<u32>(), 42);
assert_eq!(v.into_maybe_par_iter().sum::<u32>(), 42);
}
#[test]
fn test_maybe_parallel_slice() {
let v = [1, 2, 3, 4, 5];
let chunks: Vec<_> = v.maybe_par_chunks(2).collect();
assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/fancy.rs | use fancy_regex::Regex;
use std::error::Error;
#[derive(Debug)]
pub struct SysRegex {
regex: Regex,
}
impl SysRegex {
pub fn find_iter<'r, 't>(&'r self, inside: &'t str) -> Matches<'r, 't> {
Matches(self.regex.find_iter(inside))
}
pub fn new(regex_str: &str) -> Result<Self, Box<dyn Error + Send + Sync + 'static>> {
Ok(Self {
regex: Regex::new(regex_str)?,
})
}
}
pub struct Matches<'r, 't>(fancy_regex::Matches<'r, 't>);
impl<'r, 't> Iterator for Matches<'r, 't> {
type Item = (usize, usize);
fn next(&mut self) -> Option<Self::Item> {
match self.0.next() {
Some(Ok(mat)) => Some((mat.start(), mat.end())),
// stop if an error is encountered
None | Some(Err(_)) => None,
}
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/padding.rs | use crate::parallelism::*;
use crate::tokenizer::{Encoding, Result};
use serde::{Deserialize, Serialize};
/// The various possible padding directions.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum PaddingDirection {
Left,
Right,
}
impl std::convert::AsRef<str> for PaddingDirection {
fn as_ref(&self) -> &str {
match self {
PaddingDirection::Left => "left",
PaddingDirection::Right => "right",
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PaddingParams {
pub strategy: PaddingStrategy,
pub direction: PaddingDirection,
pub pad_to_multiple_of: Option<usize>,
pub pad_id: u32,
pub pad_type_id: u32,
pub pad_token: String,
}
impl Default for PaddingParams {
fn default() -> Self {
Self {
strategy: PaddingStrategy::BatchLongest,
direction: PaddingDirection::Right,
pad_to_multiple_of: None,
pad_id: 0,
pad_type_id: 0,
pad_token: String::from("[PAD]"),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PaddingStrategy {
BatchLongest,
Fixed(usize),
}
pub fn pad_encodings(encodings: &mut [Encoding], params: &PaddingParams) -> Result<()> {
if encodings.is_empty() {
return Ok(());
}
let mut pad_length = match params.strategy {
PaddingStrategy::Fixed(size) => size,
PaddingStrategy::BatchLongest => encodings
.maybe_par_iter()
.map(|e| e.get_ids().len())
.max()
.unwrap(),
};
if let Some(multiple) = params.pad_to_multiple_of {
if multiple > 0 && pad_length % multiple > 0 {
pad_length += multiple - pad_length % multiple;
}
}
encodings.maybe_par_iter_mut().for_each(|encoding| {
encoding.pad(
pad_length,
params.pad_id,
params.pad_type_id,
¶ms.pad_token,
params.direction,
)
});
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tokenizer::Encoding;
use std::collections::HashMap;
#[test]
fn pad_to_multiple() {
fn get_encodings() -> [Encoding; 2] {
[
Encoding::new(
vec![0, 1, 2, 3, 4],
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
HashMap::new(),
),
Encoding::new(
vec![0, 1, 2],
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
vec![],
HashMap::new(),
),
]
}
// Test fixed
let mut encodings = get_encodings();
let mut params = PaddingParams {
strategy: PaddingStrategy::Fixed(7),
direction: PaddingDirection::Right,
pad_to_multiple_of: Some(8),
pad_id: 0,
pad_type_id: 0,
pad_token: String::from("[PAD]"),
};
pad_encodings(&mut encodings, ¶ms).unwrap();
assert!(encodings.iter().all(|e| e.get_ids().len() == 8));
// Test batch
let mut encodings = get_encodings();
params.strategy = PaddingStrategy::BatchLongest;
params.pad_to_multiple_of = Some(6);
pad_encodings(&mut encodings, ¶ms).unwrap();
assert!(encodings.iter().all(|e| e.get_ids().len() == 6));
// Do not crash with 0
params.pad_to_multiple_of = Some(0);
pad_encodings(&mut encodings, ¶ms).unwrap();
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/whitespace.rs | use regex::Regex;
use crate::tokenizer::{
pattern::Invert, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior,
};
use crate::utils::macro_rules_attribute;
#[derive(Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Whitespace;
impl Default for Whitespace {
fn default() -> Self {
Self
}
}
impl PreTokenizer for Whitespace {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
lazy_static! {
static ref RE: Regex = Regex::new(r"\w+|[^\w\s]+").unwrap();
}
let re_ref: &Regex = &RE;
pretokenized.split(|_, normalized| {
normalized.split(Invert(re_ref), SplitDelimiterBehavior::Removed)
})
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct WhitespaceSplit;
impl PreTokenizer for WhitespaceSplit {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, normalized| {
normalized.split(char::is_whitespace, SplitDelimiterBehavior::Removed)
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType, PreTokenizer};
#[test]
fn basic() {
let tests = vec![
(
"Hey man!",
vec![("Hey", (0, 3)), ("man", (4, 7)), ("!", (7, 8))],
),
(
"How are you doing?",
vec![
("How", (0, 3)),
("are", (4, 7)),
("you", (8, 11)),
("doing", (12, 17)),
("?", (17, 18)),
],
),
("\n", vec![]),
];
let pretok = Whitespace {};
for (s, res) in tests {
let mut pretokenized = PreTokenizedString::from(s);
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
res
);
}
}
#[test]
fn whitespace_split() {
let tests = vec![
("Hey man!", vec![("Hey", (0, 3)), ("man!", (4, 8))]),
(
"Hey, man, Good?",
vec![("Hey,", (0, 4)), ("man,", (5, 9)), ("Good?", (10, 15))],
),
];
let pretok = WhitespaceSplit;
for (s, res) in tests {
let mut pretokenized = PreTokenizedString::from(s);
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
res
);
}
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/metaspace.rs | use crate::tokenizer::{Decoder, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use serde::{Deserialize, Deserializer, Serialize};
/// Enum representing options for the metaspace prepending scheme.
#[derive(Debug, Clone, PartialEq, Serialize, Eq, Deserialize, Copy)]
#[serde(rename_all = "snake_case")]
pub enum PrependScheme {
/// Specifies that the scheme should be prepended only once, on the first split.
First,
/// Specifies that the space should not be prepended.
Never,
/// Specifies that the scheme should always be prepended.
Always,
}
#[derive(Debug, Clone, PartialEq, Serialize, Eq)]
/// Replaces all the whitespaces by the provided meta character and then
/// splits on this character
#[serde(tag = "type")]
pub struct Metaspace {
replacement: char,
pub add_prefix_space: bool,
pub prepend_scheme: PrependScheme,
#[serde(skip)]
str_rep: String,
}
impl<'de> Deserialize<'de> for Metaspace {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
enum Type {
Metaspace,
}
fn default_prepend_scheme_value() -> PrependScheme {
PrependScheme::Always
}
#[derive(Deserialize)]
pub struct MetaspaceHelper {
#[serde(rename = "type")]
_type: Type,
replacement: char,
pub add_prefix_space: bool,
#[serde(default = "default_prepend_scheme_value")]
pub prepend_scheme: PrependScheme,
#[serde(skip, rename = "str_rep")]
_str_rep: String,
}
let helper = MetaspaceHelper::deserialize(deserializer)?;
let instance = Self::new_with_prepend_scheme(
helper.replacement,
helper.add_prefix_space,
helper.prepend_scheme,
);
Ok(instance)
}
}
impl Metaspace {
pub fn new(replacement: char, add_prefix_space: bool) -> Self {
Self::new_with_prepend_scheme(
replacement,
add_prefix_space,
PrependScheme::Always, // always prepend for legacy purpose
)
}
pub fn new_with_prepend_scheme(
replacement: char,
add_prefix_space: bool,
prepend_scheme: PrependScheme,
) -> Self {
Self {
replacement,
str_rep: replacement.to_string(),
add_prefix_space,
prepend_scheme,
}
}
pub fn get_replacement(&self) -> char {
self.replacement
}
pub fn set_replacement(&mut self, replacement: char) {
self.replacement = replacement;
self.str_rep = replacement.to_string();
}
pub fn get_prepend_scheme(&self) -> PrependScheme {
self.prepend_scheme
}
pub fn set_prepend_scheme(&mut self, scheme: PrependScheme) {
self.prepend_scheme = scheme;
}
}
impl Default for Metaspace {
fn default() -> Self {
Self::new('▁', true)
}
}
impl PreTokenizer for Metaspace {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
let mut first_split = true;
pretokenized.split(|_, mut normalized| {
normalized.replace(' ', &self.str_rep)?;
if self.add_prefix_space && !normalized.get().starts_with(self.replacement) {
if self.prepend_scheme == PrependScheme::Always {
normalized.prepend(&self.str_rep);
} else if self.prepend_scheme == PrependScheme::First && first_split {
normalized.prepend(&self.str_rep);
first_split = false;
}
} else {
first_split = false;
}
normalized.split(self.replacement, SplitDelimiterBehavior::MergedWithNext)
})
}
}
impl Decoder for Metaspace {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
Ok(tokens
.iter()
.enumerate()
.map(|(i, token)| {
token
.chars()
.flat_map(|c| {
if c == self.replacement {
if i == 0 && self.add_prefix_space {
None
} else {
Some(' ')
}
} else {
Some(c)
}
})
.collect::<String>()
})
.collect())
}
}
#[cfg(test)]
mod tests {
use regex::Regex;
use super::*;
use crate::{OffsetReferential, OffsetType};
#[test]
fn serialization() {
let metaspace = Metaspace::new('_', true);
let metaspace_s = r#"{"type":"Metaspace","replacement":"_","add_prefix_space":true,"prepend_scheme":"always"}"#;
assert_eq!(serde_json::to_string(&metaspace).unwrap(), metaspace_s);
assert_eq!(
serde_json::from_str::<Metaspace>(metaspace_s).unwrap(),
metaspace
);
// Also check it can deserialize previous versions
let metaspace = Metaspace::new('_', true);
let metaspace_s = r#"{"type":"Metaspace","str_rep":"_","replacement":"_","add_prefix_space":true,"prepend_scheme":"always"}"#;
assert_eq!(
serde_json::from_str::<Metaspace>(metaspace_s).unwrap(),
metaspace
);
let metaspace_parsed: Metaspace = serde_json::from_str(
r#"{"type":"Metaspace","replacement":"_","add_prefix_space":true}"#,
)
.unwrap();
assert_eq!(metaspace_parsed, metaspace);
}
#[test]
fn basic() {
let pretok = Metaspace::new('▁', true);
let mut pretokenized = PreTokenizedString::from("Hey friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("▁Hey", (0, 6)), ("▁friend!", (6, 16))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("▁Hey", (0, 3)), ("▁friend!", (3, 11))]
);
}
#[test]
fn multiple_spaces() {
let pretok = Metaspace::new('▁', true);
let mut pretokenized = PreTokenizedString::from("Hey friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 6)),
("▁", (6, 9)),
("▁", (9, 12)),
("▁friend!", (12, 22)),
]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 3)),
("▁", (3, 4)),
("▁", (4, 5)),
("▁friend!", (5, 13)),
]
);
}
#[test]
fn non_legacy_meta_space() {
assert_eq!(
Metaspace::new('▁', true),
Metaspace::new_with_prepend_scheme('▁', true, PrependScheme::Always)
);
let mut pretok = Metaspace::new('▁', true);
pretok.set_prepend_scheme(PrependScheme::Always);
assert_eq!(
pretok,
Metaspace::new_with_prepend_scheme('▁', true, PrependScheme::Always)
);
pretok.set_prepend_scheme(PrependScheme::Never);
assert_eq!(
pretok,
Metaspace::new_with_prepend_scheme('▁', true, PrependScheme::Never)
);
pretok.set_prepend_scheme(PrependScheme::First);
assert_eq!(
pretok,
Metaspace::new_with_prepend_scheme('▁', true, PrependScheme::First)
);
let mut pretokenized = PreTokenizedString::from("Hey my friend <s>how▁are you");
let re_ref = Regex::new(r"(<s>)").unwrap();
pretokenized
.split(|_, sequence| sequence.split(&re_ref, SplitDelimiterBehavior::Isolated))
.expect("Bad split");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 6)),
("▁my", (6, 11)),
("▁friend", (11, 20)),
("▁", (20, 23)),
("<s>", (23, 26)),
("how", (26, 29)),
("▁are", (29, 35)),
("▁you", (35, 41))
]
);
pretok.set_prepend_scheme(PrependScheme::Always);
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 6)),
("▁my", (6, 11)),
("▁friend", (11, 20)),
("▁", (20, 23)),
("▁<s>", (23, 29)),
("▁how", (29, 35)),
("▁are", (35, 41)),
("▁you", (41, 47))
]
);
pretok.set_prepend_scheme(PrependScheme::First);
let mut pretokenized = PreTokenizedString::from(" Hey <s>how"); // test with prefix
pretokenized
.split(|_, sequence| sequence.split(&re_ref, SplitDelimiterBehavior::Isolated))
.expect("Bad split");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 6)),
("▁", (6, 9)),
("<s>", (9, 12)),
("how", (12, 15))
]
);
let mut pretokenized = PreTokenizedString::from(" Hey <s>how <s>are <s> you"); // test with many splits
pretokenized
.split(|_, sequence| sequence.split(&re_ref, SplitDelimiterBehavior::Isolated))
.expect("Bad split");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 6)),
("▁", (6, 9)),
("<s>", (9, 12)),
("how", (12, 15)),
("▁", (15, 18)),
("<s>", (18, 21)),
("are", (21, 24)),
("▁", (24, 27)),
("<s>", (27, 30)),
("▁you", (30, 36))
]
);
}
#[test]
fn decode() {
let decoder = Metaspace::new('▁', true);
let res = decoder
.decode_chain(vec!["▁Hey".into(), "▁friend!".into()])
.unwrap();
assert_eq!(res, vec!["Hey", " friend!"])
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/sequence.rs | use crate::pre_tokenizers::PreTokenizerWrapper;
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result};
use crate::utils::macro_rules_attribute;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Sequence {
pretokenizers: Vec<PreTokenizerWrapper>,
}
impl Sequence {
pub fn new(pretokenizers: Vec<PreTokenizerWrapper>) -> Self {
Self { pretokenizers }
}
pub fn get_pre_tokenizers(&self) -> &[PreTokenizerWrapper] {
&self.pretokenizers
}
pub fn get_pre_tokenizers_mut(&mut self) -> &mut [PreTokenizerWrapper] {
&mut self.pretokenizers
}
}
impl PreTokenizer for Sequence {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
for pretokenizer in &self.pretokenizers {
pretokenizer.pre_tokenize(pretokenized)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::pre_tokenizers::{punctuation::Punctuation, whitespace::WhitespaceSplit};
use crate::{OffsetReferential, OffsetType};
#[test]
fn sequence_basic() {
let pretokenizers = vec![
PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit),
PreTokenizerWrapper::Punctuation(Punctuation::default()),
];
let pretok = Sequence::new(pretokenizers);
let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into();
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey", (0, 3)),
("friend", (4, 10)),
("!", (10, 11)),
("How", (16, 19)),
("are", (20, 23)),
("you", (24, 27)),
("?", (27, 28)),
("!", (28, 29)),
("?", (29, 30)),
]
);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/mod.rs | pub mod bert;
pub mod byte_level;
pub mod delimiter;
pub mod digits;
pub mod metaspace;
pub mod punctuation;
pub mod sequence;
pub mod split;
pub mod unicode_scripts;
pub mod whitespace;
use serde::{Deserialize, Serialize};
use crate::pre_tokenizers::bert::BertPreTokenizer;
use crate::pre_tokenizers::byte_level::ByteLevel;
use crate::pre_tokenizers::delimiter::CharDelimiterSplit;
use crate::pre_tokenizers::digits::Digits;
use crate::pre_tokenizers::metaspace::Metaspace;
use crate::pre_tokenizers::punctuation::Punctuation;
use crate::pre_tokenizers::sequence::Sequence;
use crate::pre_tokenizers::split::Split;
use crate::pre_tokenizers::unicode_scripts::UnicodeScripts;
use crate::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit};
use crate::{PreTokenizedString, PreTokenizer};
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
#[serde(untagged)]
pub enum PreTokenizerWrapper {
BertPreTokenizer(BertPreTokenizer),
ByteLevel(ByteLevel),
Delimiter(CharDelimiterSplit),
Metaspace(Metaspace),
Whitespace(Whitespace),
Sequence(Sequence),
Split(Split),
Punctuation(Punctuation),
WhitespaceSplit(WhitespaceSplit),
Digits(Digits),
UnicodeScripts(UnicodeScripts),
}
impl PreTokenizer for PreTokenizerWrapper {
fn pre_tokenize(&self, normalized: &mut PreTokenizedString) -> crate::Result<()> {
match self {
Self::BertPreTokenizer(bpt) => bpt.pre_tokenize(normalized),
Self::ByteLevel(bpt) => bpt.pre_tokenize(normalized),
Self::Delimiter(dpt) => dpt.pre_tokenize(normalized),
Self::Metaspace(mspt) => mspt.pre_tokenize(normalized),
Self::Whitespace(wspt) => wspt.pre_tokenize(normalized),
Self::Punctuation(tok) => tok.pre_tokenize(normalized),
Self::Sequence(tok) => tok.pre_tokenize(normalized),
Self::Split(tok) => tok.pre_tokenize(normalized),
Self::WhitespaceSplit(wspt) => wspt.pre_tokenize(normalized),
Self::Digits(wspt) => wspt.pre_tokenize(normalized),
Self::UnicodeScripts(us) => us.pre_tokenize(normalized),
}
}
}
impl_enum_from!(BertPreTokenizer, PreTokenizerWrapper, BertPreTokenizer);
impl_enum_from!(ByteLevel, PreTokenizerWrapper, ByteLevel);
impl_enum_from!(CharDelimiterSplit, PreTokenizerWrapper, Delimiter);
impl_enum_from!(Whitespace, PreTokenizerWrapper, Whitespace);
impl_enum_from!(Punctuation, PreTokenizerWrapper, Punctuation);
impl_enum_from!(Sequence, PreTokenizerWrapper, Sequence);
impl_enum_from!(Split, PreTokenizerWrapper, Split);
impl_enum_from!(Metaspace, PreTokenizerWrapper, Metaspace);
impl_enum_from!(WhitespaceSplit, PreTokenizerWrapper, WhitespaceSplit);
impl_enum_from!(Digits, PreTokenizerWrapper, Digits);
impl_enum_from!(UnicodeScripts, PreTokenizerWrapper, UnicodeScripts);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_deserialize() {
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"Sequence","pretokenizers":[{"type":"WhitespaceSplit"},{"type":"Metaspace","replacement":"▁","str_rep":"▁","add_prefix_space":true}]}"#).unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Sequence(Sequence::new(vec![
PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}),
PreTokenizerWrapper::Metaspace(Metaspace::new('▁', true))
]))
);
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(
r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true}"#,
)
.unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Metaspace(Metaspace::new('▁', true))
);
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"Sequence","pretokenizers":[{"type":"WhitespaceSplit"},{"type":"Metaspace","replacement":"▁","add_prefix_space":true}]}"#).unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Sequence(Sequence::new(vec![
PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}),
PreTokenizerWrapper::Metaspace(Metaspace::new('▁', true))
]))
);
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(
r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true, "prepend_scheme":"first"}"#,
)
.unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Metaspace(Metaspace::new_with_prepend_scheme(
'▁',
true,
metaspace::PrependScheme::First
))
);
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(
r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true, "prepend_scheme":"always"}"#,
)
.unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Metaspace(Metaspace::new_with_prepend_scheme(
'▁',
true,
metaspace::PrependScheme::Always
))
);
}
#[test]
fn test_deserialize_whitespace_split() {
let pre_tokenizer: PreTokenizerWrapper =
serde_json::from_str(r#"{"type":"WhitespaceSplit"}"#).unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {})
);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/split.rs | use crate::utils::SysRegex;
use serde::{Deserialize, Deserializer, Serialize};
use crate::tokenizer::{
pattern::Invert, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior,
};
/// Represents the different patterns that `Split` can use
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)]
pub enum SplitPattern {
String(String),
Regex(String),
}
impl From<String> for SplitPattern {
fn from(v: String) -> Self {
Self::String(v)
}
}
impl From<&str> for SplitPattern {
fn from(v: &str) -> Self {
Self::String(v.to_owned())
}
}
#[derive(Debug, Serialize)]
#[serde(tag = "type")]
pub struct Split {
pattern: SplitPattern,
#[serde(skip)]
regex: SysRegex,
behavior: SplitDelimiterBehavior,
invert: bool,
}
impl<'de> Deserialize<'de> for Split {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
enum Type {
Split,
}
#[derive(Deserialize)]
pub struct SplitHelper {
#[serde(rename = "type")]
_type: Type,
pattern: SplitPattern,
behavior: SplitDelimiterBehavior,
invert: bool,
}
let helper = SplitHelper::deserialize(deserializer)?;
Self::new(helper.pattern, helper.behavior, helper.invert).map_err(serde::de::Error::custom)
}
}
impl Clone for Split {
fn clone(&self) -> Self {
Self::new(self.pattern.clone(), self.behavior, self.invert).unwrap()
}
}
impl PartialEq for Split {
fn eq(&self, other: &Self) -> bool {
self.pattern == other.pattern
&& self.behavior == other.behavior
&& self.invert == other.invert
}
}
impl Split {
pub fn new<I: Into<SplitPattern>>(
pattern: I,
behavior: SplitDelimiterBehavior,
invert: bool,
) -> Result<Self> {
let pattern: SplitPattern = pattern.into();
let regex = match &pattern {
SplitPattern::String(s) => SysRegex::new(®ex::escape(s))?,
SplitPattern::Regex(r) => SysRegex::new(r)?,
};
Ok(Self {
pattern,
regex,
behavior,
invert,
})
}
}
impl PreTokenizer for Split {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
if self.invert {
pretokenized.split(|_, normalized| normalized.split(Invert(&self.regex), self.behavior))
} else {
pretokenized.split(|_, normalized| normalized.split(&self.regex, self.behavior))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType, PreTokenizer};
use SplitDelimiterBehavior::*;
#[test]
fn basic() {
let tests = vec![
(
Removed,
"How are you doing?",
vec![
("How", (0, 3)),
("are", (4, 7)),
("you", (8, 11)),
("doing", (12, 17)),
("?", (17, 18)),
],
),
(
Isolated,
"How are you doing?",
vec![
("How", (0, 3)),
(" ", (3, 4)),
("are", (4, 7)),
(" ", (7, 8)),
("you", (8, 11)),
(" ", (11, 12)),
("doing", (12, 17)),
("?", (17, 18)),
],
),
(
MergedWithPrevious,
"How are you doing?",
vec![
("How ", (0, 4)),
("are ", (4, 8)),
("you ", (8, 12)),
("doing", (12, 17)),
("?", (17, 18)),
],
),
(
MergedWithNext,
"How are you doing?",
vec![
("How", (0, 3)),
(" are", (3, 7)),
(" you", (7, 11)),
(" doing", (11, 17)),
("?", (17, 18)),
],
),
(
Contiguous,
"How are you doing?",
vec![
("How", (0, 3)),
(" ", (3, 4)),
("are", (4, 7)),
(" ", (7, 8)),
("you", (8, 11)),
(" ", (11, 12)),
("doing?", (12, 18)),
],
),
];
// use whitespace regex
let regex = SplitPattern::Regex(r"\w+|[^\w\s]+".into());
for (behavior, s, res) in tests {
let mut pretokenized = PreTokenizedString::from(s);
let pretok = Split::new(regex.clone(), behavior, true).unwrap();
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
res
);
}
}
#[test]
fn regex_string() {
let mut pretok_str_for_regex = PreTokenizedString::from("Hey, man!");
let mut pretok_str_for_string = pretok_str_for_regex.clone();
// pre-tokenizer splits on " " - one from Regex, one from string
let pretokenizer_regex = Split::new(
SplitPattern::Regex(r"\s+".into()),
SplitDelimiterBehavior::Removed,
false,
)
.unwrap();
let pretokenizer_string = Split::new(" ", SplitDelimiterBehavior::Removed, false).unwrap();
pretokenizer_regex
.pre_tokenize(&mut pretok_str_for_regex)
.unwrap();
pretokenizer_string
.pre_tokenize(&mut pretok_str_for_string)
.unwrap();
assert_eq!(pretok_str_for_regex, pretok_str_for_string);
}
#[test]
fn invert() {
let mut pretok_str = PreTokenizedString::from("Hello Hello Hello");
let mut pretok_str_for_invert = pretok_str.clone();
// one pre-tokenizer splits on " " - one splits inverted on "Hello"
let pretokenizer = Split::new(" ", SplitDelimiterBehavior::Removed, false).unwrap();
let pretokenizer_invert =
Split::new("Hello", SplitDelimiterBehavior::Removed, true).unwrap();
pretokenizer.pre_tokenize(&mut pretok_str).unwrap();
pretokenizer_invert
.pre_tokenize(&mut pretok_str_for_invert)
.unwrap();
assert_eq!(pretok_str, pretok_str_for_invert);
}
#[test]
fn serialization() {
use SplitDelimiterBehavior::*;
let split = Split::new("Hello", Removed, true).unwrap();
let split_s =
r#"{"type":"Split","pattern":{"String":"Hello"},"behavior":"Removed","invert":true}"#;
assert_eq!(serde_json::to_string(&split).unwrap(), split_s);
assert_eq!(serde_json::from_str::<Split>(split_s).unwrap(), split);
let split = Split::new(SplitPattern::Regex(r"\s+".into()), Isolated, false).unwrap();
let split_s =
r#"{"type":"Split","pattern":{"Regex":"\\s+"},"behavior":"Isolated","invert":false}"#;
assert_eq!(serde_json::to_string(&split).unwrap(), split_s);
assert_eq!(serde_json::from_str::<Split>(split_s).unwrap(), split);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/digits.rs | use serde::{Deserialize, Serialize};
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
#[derive(Clone, Debug, PartialEq, Eq)]
/// Pre tokenizes the numbers into single tokens. If individual_digits is set
/// to true, then all digits are splitted into individual tokens.
#[non_exhaustive]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Digits {
pub individual_digits: bool,
}
impl Digits {
pub fn new(individual_digits: bool) -> Self {
Self { individual_digits }
}
}
impl Default for Digits {
fn default() -> Self {
Self::new(false)
}
}
impl PreTokenizer for Digits {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
if self.individual_digits {
pretokenized.split(|_, normalized| {
normalized.split(char::is_numeric, SplitDelimiterBehavior::Isolated)
})
} else {
pretokenized.split(|_, normalized| {
normalized.split(char::is_numeric, SplitDelimiterBehavior::Contiguous)
})
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType};
#[test]
fn numbers() {
let pretok = Digits::new(false);
let mut pretokenized = PreTokenizedString::from("Hey 123 friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Hey ", (0, 4)), ("123", (4, 7)), (" friend!", (7, 15))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Hey ", (0, 4)), ("123", (4, 7)), (" friend!", (7, 15))]
);
}
#[test]
fn individual_digits() {
let pretok = Digits::new(true);
let mut pretokenized = PreTokenizedString::from("Hey 123 friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey ", (0, 4)),
("1", (4, 5)),
("2", (5, 6)),
("3", (6, 7)),
(" friend!", (7, 15))
]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey ", (0, 4)),
("1", (4, 5)),
("2", (5, 6)),
("3", (6, 7)),
(" friend!", (7, 15))
]
);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/bert.rs | use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
use unicode_categories::UnicodeCategories;
fn is_bert_punc(x: char) -> bool {
char::is_ascii_punctuation(&x) || x.is_punctuation()
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct BertPreTokenizer;
impl PreTokenizer for BertPreTokenizer {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, s| s.split(char::is_whitespace, SplitDelimiterBehavior::Removed))?;
pretokenized.split(|_, s| s.split(is_bert_punc, SplitDelimiterBehavior::Isolated))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{NormalizedString, OffsetReferential, OffsetType};
#[test]
fn basic() {
let pretok = BertPreTokenizer;
let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into();
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey", (0, 3)),
("friend", (4, 10)),
("!", (10, 11)),
("How", (16, 19)),
("are", (20, 23)),
("you", (24, 27)),
("?", (27, 28)),
("!", (28, 29)),
("?", (29, 30)),
]
);
}
#[test]
fn chinese_chars() {
let mut n = NormalizedString::from("野口里佳 Noguchi Rika");
n.transform(
n.get().to_owned().chars().flat_map(|c| {
if (c as usize) > 0x4E00 {
vec![(' ', 0), (c, 1), (' ', 1)]
} else {
vec![(c, 0)]
}
}),
0,
);
let mut pretokenized = n.into();
let pretok = BertPreTokenizer;
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("野", (0, 3)),
("口", (3, 6)),
("里", (6, 9)),
("佳", (9, 12)),
("Noguchi", (13, 20)),
("Rika", (21, 25))
]
);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs | use serde::{Deserialize, Serialize};
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
use unicode_categories::UnicodeCategories;
fn is_punc(x: char) -> bool {
char::is_ascii_punctuation(&x) || x.is_punctuation()
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Punctuation {
#[serde(default = "default_split")]
behavior: SplitDelimiterBehavior,
}
fn default_split() -> SplitDelimiterBehavior {
SplitDelimiterBehavior::Isolated
}
impl Punctuation {
pub fn new(behavior: SplitDelimiterBehavior) -> Self {
Self { behavior }
}
}
impl Default for Punctuation {
fn default() -> Self {
Self::new(SplitDelimiterBehavior::Isolated)
}
}
impl PreTokenizer for Punctuation {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, s| s.split(is_punc, self.behavior))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType};
#[test]
fn punctuation_basic() {
let pretok = Punctuation::default();
let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into();
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey friend", (0, 10)),
("!", (10, 11)),
(" How are you", (11, 27)),
("?", (27, 28)),
("!", (28, 29)),
("?", (29, 30)),
]
);
}
#[test]
fn deserialization() {
let punctuation: Punctuation = serde_json::from_str(r#"{"type": "Punctuation"}"#).unwrap();
assert_eq!(punctuation, Punctuation::default());
assert_eq!(
punctuation,
Punctuation::new(SplitDelimiterBehavior::Isolated)
);
}
#[test]
#[should_panic]
fn deserialization_erroneous() {
let _punctuation: Punctuation =
serde_json::from_str(r#"{"type": "WhitespaceSplit"}"#).unwrap();
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/byte_level.rs | use std::collections::{HashMap, HashSet};
use crate::utils::SysRegex;
use serde::{Deserialize, Serialize};
use crate::tokenizer::{
Decoder, Encoding, PostProcessor, PreTokenizedString, PreTokenizer, Result,
SplitDelimiterBehavior,
};
use crate::utils::macro_rules_attribute;
/// Converts bytes to unicode characters.
/// See https://github.com/openai/gpt-2/blob/master/src/encoder.py#L9
fn bytes_char() -> HashMap<u8, char> {
let mut bs: Vec<u8> = vec![];
bs.extend(b'!'..=b'~');
bs.extend(b'\xA1'..=b'\xAC');
bs.extend(b'\xAE'..=b'\xFF');
let mut cs: Vec<u32> = bs.iter().map(|i| *i as u32).collect();
let mut n = 0;
for b in 0..=255u8 {
if !bs.contains(&b) {
bs.push(b);
cs.push(u32::pow(2, 8) + n);
n += 1;
}
}
bs.into_iter()
.zip(cs)
.map(|(f, t)| (f, unsafe { std::char::from_u32_unchecked(t) }))
.collect()
}
lazy_static! {
/// Regex that matches exactly one token.
/// See https://github.com/openai/gpt-2/blob/master/src/encoder.py#L98
static ref RE: SysRegex = SysRegex::new(
r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"
)
.unwrap();
static ref BYTES_CHAR: HashMap<u8, char> = bytes_char();
static ref CHAR_BYTES: HashMap<char, u8> =
bytes_char().into_iter().map(|(c, b)| (b, c)).collect();
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
/// Provides all the necessary steps to handle the BPE tokenization at the byte-level. Takes care
/// of all the required processing steps to transform a UTF-8 string as needed before and after the
/// BPE model does its job.
#[macro_rules_attribute(impl_serde_type!)]
#[non_exhaustive]
pub struct ByteLevel {
/// Whether to add a leading space to the first word. This allows to treat the leading word
/// just as any other word.
pub add_prefix_space: bool,
/// Whether the post processing step should trim offsets to avoid including whitespaces.
pub trim_offsets: bool,
/// Whether to use the standard GPT2 regex for whitespace splitting
/// Set it to False if you want to use your own splitting.
#[serde(default = "default_true")]
pub use_regex: bool,
}
fn default_true() -> bool {
true
}
impl Default for ByteLevel {
fn default() -> Self {
Self {
add_prefix_space: true,
trim_offsets: true,
use_regex: true,
}
}
}
impl ByteLevel {
pub fn new(add_prefix_space: bool, trim_offsets: bool, use_regex: bool) -> Self {
Self {
add_prefix_space,
trim_offsets,
use_regex,
}
}
pub fn alphabet() -> HashSet<char> {
BYTES_CHAR.values().copied().collect()
}
#[must_use]
pub fn add_prefix_space(mut self, v: bool) -> Self {
self.add_prefix_space = v;
self
}
#[must_use]
pub fn trim_offsets(mut self, v: bool) -> Self {
self.trim_offsets = v;
self
}
#[must_use]
pub fn use_regex(mut self, v: bool) -> Self {
self.use_regex = v;
self
}
}
/// As a `PreTokenizer`, `ByteLevel` is in charge of transforming all the unicode characters into
/// their byte-level counterpart. It also splits the input according to the configured regex.
// TODO: Give the ability to modify this regex
impl PreTokenizer for ByteLevel {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
let re_ref: &SysRegex = &RE;
pretokenized.split(|_, mut normalized| {
if self.add_prefix_space && !normalized.get().starts_with(' ') {
normalized.prepend(" ");
}
if self.use_regex {
normalized.split(re_ref, SplitDelimiterBehavior::Isolated)
} else {
Ok(vec![normalized])
}
})?;
pretokenized.normalize(|normalized| {
let s = normalized.get();
let mut transformations: Vec<(char, isize)> = Vec::with_capacity(s.len());
let mut i = 0;
for cur_char in s.chars() {
let size = cur_char.len_utf8();
let bytes = s[i..i + size].as_bytes();
i += size;
transformations.extend(
bytes
.iter()
.enumerate()
.map(|(i, b)| (BYTES_CHAR[b], isize::from(i > 0))),
);
}
normalized.transform(transformations, 0);
Ok(())
})
}
}
/// As a `Decoder`, `ByteLevel` is in charge of converting any byte-level characters to their
/// unicode counterpart, before merging everything back into a single String.
/// This decoder will consume the tokens and merge them in one step to alleviate
/// the fact that single token decoded might be a byte not representable as
/// as String.
impl Decoder for ByteLevel {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
let toks = tokens
.into_iter()
.flat_map(|t| {
t.chars()
.try_fold(vec![], |mut acc, c| {
CHAR_BYTES.get(&c).map(|b| {
acc.push(*b);
acc
})
})
.unwrap_or_else(|| t.as_bytes().to_vec())
})
.collect::<Vec<u8>>();
Ok(vec![String::from_utf8_lossy(&toks).to_string()])
}
}
/// As a `PostProcessor`, `ByteLevel` is in charge of trimming the offsets if necessary.
impl PostProcessor for ByteLevel {
fn added_tokens(&self, _is_pair: bool) -> usize {
0
}
fn process_encodings(
&self,
mut encodings: Vec<Encoding>,
_add_special_tokens: bool,
) -> Result<Vec<Encoding>> {
if self.trim_offsets {
for encoding in encodings.iter_mut() {
process_offsets(encoding, self.add_prefix_space);
encoding
.get_overflowing_mut()
.iter_mut()
.for_each(|encoding| process_offsets(encoding, self.add_prefix_space));
}
}
for (i, encoding) in encodings.iter_mut().enumerate() {
encoding.set_sequence_id(i);
}
Ok(encodings)
//<dyn PostProcessor>::default_process(encodings, add_special_tokens)
}
}
pub fn process_offsets(encoding: &mut Encoding, add_prefix_space: bool) {
encoding.process_tokens_with_offsets_mut(|(i, (token, offsets))| {
let mut leading_spaces = token
.chars()
.take_while(|c| *c == BYTES_CHAR[&b' '] || c.is_whitespace())
.count();
let trailing_spaces = token
.chars()
.rev()
.take_while(|c| *c == BYTES_CHAR[&b' '] || c.is_whitespace())
.count();
if leading_spaces > 0 || trailing_spaces > 0 {
if leading_spaces > 0 {
// If user uses `is_pretokenized=True` we might have
// offsets that might begin at the start of the string but are
// NOT the first token.
let is_first = i == 0 || offsets.0 == 0;
if is_first && add_prefix_space && leading_spaces == 1 {
// If we are processing the first pair of offsets, with `add_prefix_space`,
// then we shouldn't remove anything we added. If there are more than one
// leading spaces though, it means we didn't add them, and they should be
// removed.
leading_spaces = 0;
}
offsets.0 = std::cmp::min(offsets.0 + leading_spaces, offsets.1);
}
if trailing_spaces > 0 && offsets.1 >= trailing_spaces {
offsets.1 = std::cmp::max(offsets.1 - trailing_spaces, offsets.0);
}
}
});
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tokenizer::{
Decoder, Encoding, OffsetReferential, OffsetType, PostProcessor, PreTokenizedString,
PreTokenizer,
};
use std::iter::FromIterator;
#[test]
fn pre_tokenization() {
let bytelevel = ByteLevel::default().add_prefix_space(false);
let mut pretokenized: PreTokenizedString = "Hello my friend, how is your day going?".into();
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hello", (0, 5)),
("Ġmy", (5, 8)),
("Ġfriend", (8, 15)),
(",", (15, 16)),
("Ġhow", (16, 20)),
("Ġis", (20, 23)),
("Ġyour", (23, 28)),
("Ġday", (28, 32)),
("Ġgoing", (32, 38)),
("?", (38, 39))
]
);
}
#[test]
fn pre_tokenization_no_regex() {
let bytelevel = ByteLevel::default().use_regex(false);
let mut pretokenized: PreTokenizedString = "Hello my friend, how is your day going?".into();
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("ĠHelloĠmyĠfriend,ĠhowĠisĠyourĠdayĠgoing?", (0, 39))]
);
}
#[test]
fn decoding() {
let bytelevel = ByteLevel::default().add_prefix_space(false);
assert_eq!(
bytelevel
.decode_chain(
vec![
"Hello", "Ġmy", "Ġfriend", ",", "Ġhow", "Ġis", "Ġyour", "Ġday", "Ġgoing",
"?"
]
.into_iter()
.map(|s| s.into())
.collect::<Vec<String>>()
)
.unwrap(),
vec!["Hello my friend, how is your day going?"]
);
}
#[test]
fn add_prefix_space() {
let bytelevel = ByteLevel::default().add_prefix_space(true);
for s in &[
" Hello my friend, how is your day going?",
"Hello my friend, how is your day going?",
] {
let mut pretokenized = PreTokenizedString::from(*s);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("ĠHello", (0, 7)),
("Ġmy", (7, 11)),
("Ġfriend", (11, 19)),
(",", (19, 20)),
("Ġhow", (20, 25)),
("Ġis", (25, 29)),
("Ġyour", (29, 35)),
("Ġday", (35, 40)),
("Ġgoing", (40, 47)),
("?", (47, 48))
]
);
}
}
#[test]
fn decode_works_on_separated_tokens() {
let samples = vec![
"A Nuskhuri abbreviation of იესუ ქრისტე ( iesu kriste ) \" Jesus Christ \"",
"An equal number have descenders , like p or q in English \
: გ , დ , ე , ვ , კ , ლ , ჟ , ტ , უ , ფ , ღ , ყ , ც",
];
let bytelevel = ByteLevel::default().add_prefix_space(false);
for sample in samples {
let mut pretokenized = PreTokenizedString::from(sample);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
let separated_tokens = pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.iter()
.flat_map(|(s, _, _)| s.split("").map(|t| t.into()))
.collect::<Vec<_>>();
assert_eq!(
sample,
bytelevel.decode_chain(separated_tokens).unwrap().join("")
);
}
}
#[test]
fn handling_of_newlines() {
let mut pretokenized = PreTokenizedString::from("Hello there\nHello there");
let bytelevel = ByteLevel::default().add_prefix_space(false);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hello", (0, 5)),
("Ġthere", (5, 11)),
("Ċ", (11, 12)),
("Hello", (12, 17)),
("Ġthere", (17, 23))
]
);
}
#[test]
fn handling_of_multiple_whitespaces() {
let mut pretokenized = PreTokenizedString::from("Hello there dear");
let bytelevel = ByteLevel::default().add_prefix_space(false);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hello", (0, 5)),
("Ġthere", (5, 11)),
("ĠĠĠĠĠĠ", (11, 17)),
("Ġdear", (17, 22))
]
);
}
#[test]
fn offsets_when_char_split_up() {
let input = "i⭢j";
let mut pretokenized = PreTokenizedString::from(input);
let bytelevel = ByteLevel::default().add_prefix_space(false);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("i", (0, 1)), ("âŃ¢", (1, 4)), ("j", (4, 5))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("i", (0, 1)), ("âŃ¢", (1, 7)), ("j", (7, 8))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(_, o, _)| &input[o.0..o.1])
.collect::<Vec<_>>(),
vec!["i", "⭢", "j"]
);
}
#[test]
fn processor_trims_offsets_pre_tokenized() {
// If user uses `is_pretokenized=True` we might have
// offsets that might begin at the start of the string but are
// NOT the first token.
let mut encoding = Encoding::new(
vec![0; 5],
vec![],
vec!["Ġl".into(), "ove".into(), "Ġl".into(), "ove".into()],
vec![],
vec![(0, 1), (1, 4), (0, 1), (1, 4)],
vec![],
vec![],
vec![],
HashMap::new(),
);
process_offsets(&mut encoding, true);
assert_eq!(
encoding,
Encoding::new(
vec![0; 5],
vec![],
vec!["Ġl".into(), "ove".into(), "Ġl".into(), "ove".into()],
vec![],
vec![(0, 1), (1, 4), (0, 1), (1, 4)],
vec![],
vec![],
vec![],
HashMap::new(),
)
);
}
#[test]
fn processor_trims_offsets() {
let start = Encoding::new(
vec![0; 5],
vec![],
vec![
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
],
vec![],
vec![(0, 1), (0, 11), (11, 18), (18, 25), (25, 29)],
vec![],
vec![],
vec![],
HashMap::new(),
);
let expected = Encoding::new(
vec![0; 5],
vec![0; 5],
vec![
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
],
vec![],
vec![(0, 0), (4, 9), (13, 18), (18, 23), (29, 29)],
vec![],
vec![],
vec![],
HashMap::from_iter(vec![(0, 0..5)]),
);
let bytelevel = ByteLevel::default().trim_offsets(true);
assert_eq!(
expected,
bytelevel.process(start.clone(), None, false).unwrap()
);
let pair_expected = Encoding::new(
vec![0; 10],
vec![0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
vec![
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
],
vec![],
vec![
(0, 0),
(4, 9),
(13, 18),
(18, 23),
(29, 29),
(0, 0),
(4, 9),
(13, 18),
(18, 23),
(29, 29),
],
vec![],
vec![],
vec![],
HashMap::from_iter(vec![(0, 0..5), (1, 5..10)]),
);
assert_eq!(
pair_expected,
bytelevel
.process(start.clone(), Some(start), false)
.unwrap()
);
}
#[test]
fn decode_unknown_characters() {
let byte_level = ByteLevel::default();
assert_eq!(
byte_level
.decode_chain(vec![
"Hello".into(),
"Ġthere".into(),
"Ġdear".into(),
"Ġfriend!".into(),
"Ġ".into(),
"[PA D]".into()
])
.unwrap(),
vec!["Hello there dear friend! [PA D]"]
);
}
#[test]
fn deserialization() {
// Before use_regex
let byte_level: ByteLevel = serde_json::from_str(
r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false}"#,
)
.unwrap();
assert!(byte_level.use_regex);
// Loading works, new future BC test.
let byte_level: ByteLevel = serde_json::from_str(
r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false, "use_regex": true}"#,
)
.unwrap();
assert!(byte_level.use_regex);
let byte_level: ByteLevel = serde_json::from_str(
r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false, "use_regex": false}"#,
)
.unwrap();
assert!(!byte_level.use_regex);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/delimiter.rs | use serde::{Deserialize, Serialize};
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[non_exhaustive]
#[macro_rules_attribute(impl_serde_type!)]
pub struct CharDelimiterSplit {
pub delimiter: char,
}
impl CharDelimiterSplit {
pub fn new(delimiter: char) -> Self {
Self { delimiter }
}
}
impl PreTokenizer for CharDelimiterSplit {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
// TODO: Maybe add the option to specify the behavior
pretokenized.split(|_, normalized| {
normalized.split(self.delimiter, SplitDelimiterBehavior::Removed)
})
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/mod.rs | mod pre_tokenizer;
mod scripts;
// Re-export the PreTokenizer
pub use pre_tokenizer::UnicodeScripts;
| 0 |
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/pre_tokenizer.rs | use crate::pre_tokenizers::unicode_scripts::scripts::{get_script, Script};
use crate::tokenizer::{normalizer::Range, PreTokenizedString, PreTokenizer, Result};
use crate::utils::macro_rules_attribute;
#[derive(Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct UnicodeScripts;
impl UnicodeScripts {
pub fn new() -> Self {
Self {}
}
}
impl Default for UnicodeScripts {
fn default() -> Self {
Self::new()
}
}
// This code exists in the Unigram default IsValidSentencePiece.
// It could be integrated directly within `get_script` but I
// think it's kind of tricky to see those modifications later
// I am guessing release mode will optimize this away anyway.
fn fixed_script(c: char) -> Script {
let raw_script = get_script(c);
if c as u32 == 0x30FC {
Script::Han
} else if c == ' ' {
Script::Any
} else {
match raw_script {
Script::Hiragana => Script::Han,
Script::Katakana => Script::Han,
script => script,
}
}
}
impl PreTokenizer for UnicodeScripts {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, normalized| {
let mut last_script = None;
let mut offset = 0;
let mut ranges: Vec<_> = normalized
.get()
.chars()
.filter_map(|c| {
let script = Some(fixed_script(c));
let result = if script != Some(Script::Any)
&& last_script != Some(Script::Any)
&& last_script != script
{
Some(offset)
} else {
None
};
offset += c.len_utf8();
if script != Some(Script::Any) {
last_script = script;
}
result
})
.collect();
ranges.push(normalized.get().len());
Ok(ranges
.windows(2)
.map(|item| {
normalized
.slice(Range::Normalized(item[0]..item[1]))
.expect("NormalizedString bad split")
})
.collect::<Vec<_>>())
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::OffsetReferential;
use crate::OffsetType;
#[test]
fn basic() {
let pretok = UnicodeScripts {};
let mut pretokenized = PreTokenizedString::from("どこで生れ。Yes");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("どこで生れ", (0, 15)), ("。", (15, 18)), ("Yes", (18, 21))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("どこで生れ", (0, 15)), ("。", (15, 18)), ("Yes", (18, 21))]
);
}
#[test]
fn spaces_are_included_in_every_script() {
let pretok = UnicodeScripts {};
let mut pretokenized = PreTokenizedString::from("Apples are りんご 林檎");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Apples are ", (0, 11)), ("りんご 林檎", (11, 27))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Apples are ", (0, 11)), ("りんご 林檎", (11, 27))]
);
}
#[test]
fn test_unicode_script() {
assert_eq!(Script::Han, fixed_script('京'));
assert_eq!(Script::Han, fixed_script('太'));
assert_eq!(Script::Han, fixed_script('い'));
assert_eq!(Script::Han, fixed_script('グ'));
assert_eq!(Script::Han, fixed_script('ー'));
assert_eq!(Script::Latin, fixed_script('a'));
assert_eq!(Script::Latin, fixed_script('A'));
assert_eq!(Script::Common, fixed_script('0'));
assert_eq!(Script::Common, fixed_script('$'));
assert_eq!(Script::Common, fixed_script('@'));
assert_eq!(Script::Common, fixed_script('-'));
assert_eq!(Script::Any, fixed_script(' '));
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/scripts.rs | // Generated by modified Perl script at https://github.com/google/sentencepiece/blob/master/data/gen_unicode_scripts_code.pl
// Unicode scripts : https://gist.github.com/Narsil/07556f26dc84a6baeff4d499e68d3cd2
// Rust adaptation : https://gist.github.com/Narsil/1df9fbbf5296a8d4d62de55dcb2fe700
#[derive(PartialEq, Debug, Clone, Copy, Eq)]
pub enum Script {
Any,
Adlam,
Ahom,
AnatolianHieroglyphs,
Arabic,
Armenian,
Avestan,
Balinese,
Bamum,
BassaVah,
Batak,
Bengali,
Bhaiksuki,
Bopomofo,
Brahmi,
Braille,
Buginese,
Buhid,
CanadianAboriginal,
Carian,
CaucasianAlbanian,
Chakma,
Cham,
Cherokee,
Common,
Coptic,
Cuneiform,
Cypriot,
Cyrillic,
Deseret,
Devanagari,
Duployan,
EgyptianHieroglyphs,
Elbasan,
Ethiopic,
Georgian,
Glagolitic,
Gothic,
Grantha,
Greek,
Gujarati,
Gurmukhi,
Han,
Hangul,
Hanunoo,
Hatran,
Hebrew,
Hiragana,
ImperialAramaic,
Inherited,
InscriptionalPahlavi,
InscriptionalParthian,
Javanese,
Kaithi,
Kannada,
Katakana,
KayahLi,
Kharoshthi,
Khmer,
Khojki,
Khudawadi,
Lao,
Latin,
Lepcha,
Limbu,
LinearA,
LinearB,
Lisu,
Lycian,
Lydian,
Mahajani,
Malayalam,
Mandaic,
Manichaean,
Marchen,
MeeteiMayek,
MendeKikakui,
MeroiticCursive,
MeroiticHieroglyphs,
Miao,
Modi,
Mongolian,
Mro,
Multani,
Myanmar,
Nabataean,
NewTaiLue,
Newa,
Nko,
Ogham,
OlChiki,
OldHungarian,
OldItalic,
OldNorthArabian,
OldPermic,
OldPersian,
OldSouthArabian,
OldTurkic,
Oriya,
Osage,
Osmanya,
PahawhHmong,
Palmyrene,
PauCinHau,
PhagsPa,
Phoenician,
PsalterPahlavi,
Rejang,
Runic,
Samaritan,
Saurashtra,
Sharada,
Shavian,
Siddham,
SignWriting,
Sinhala,
SoraSompeng,
Sundanese,
SylotiNagri,
Syriac,
Tagalog,
Tagbanwa,
TaiLe,
TaiTham,
TaiViet,
Takri,
Tamil,
Tangut,
Telugu,
Thaana,
Thai,
Tibetan,
Tifinagh,
Tirhuta,
Ugaritic,
Vai,
WarangCiti,
Yi,
}
pub fn get_script(c: char) -> Script {
match c as u32 {
0x0000..=0x001F => Script::Common,
0x0020 => Script::Common,
0x0021..=0x0023 => Script::Common,
0x0024 => Script::Common,
0x0025..=0x0027 => Script::Common,
0x0028 => Script::Common,
0x0029 => Script::Common,
0x002A => Script::Common,
0x002B => Script::Common,
0x002C => Script::Common,
0x002D => Script::Common,
0x002E..=0x002F => Script::Common,
0x0030..=0x0039 => Script::Common,
0x003A..=0x003B => Script::Common,
0x003C..=0x003E => Script::Common,
0x003F..=0x0040 => Script::Common,
0x005B => Script::Common,
0x005C => Script::Common,
0x005D => Script::Common,
0x005E => Script::Common,
0x005F => Script::Common,
0x0060 => Script::Common,
0x007B => Script::Common,
0x007C => Script::Common,
0x007D => Script::Common,
0x007E => Script::Common,
0x007F..=0x009F => Script::Common,
0x00A0 => Script::Common,
0x00A1 => Script::Common,
0x00A2..=0x00A5 => Script::Common,
0x00A6 => Script::Common,
0x00A7 => Script::Common,
0x00A8 => Script::Common,
0x00A9 => Script::Common,
0x00AB => Script::Common,
0x00AC => Script::Common,
0x00AD => Script::Common,
0x00AE => Script::Common,
0x00AF => Script::Common,
0x00B0 => Script::Common,
0x00B1 => Script::Common,
0x00B2..=0x00B3 => Script::Common,
0x00B4 => Script::Common,
0x00B5 => Script::Common,
0x00B6..=0x00B7 => Script::Common,
0x00B8 => Script::Common,
0x00B9 => Script::Common,
0x00BB => Script::Common,
0x00BC..=0x00BE => Script::Common,
0x00BF => Script::Common,
0x00D7 => Script::Common,
0x00F7 => Script::Common,
0x02B9..=0x02C1 => Script::Common,
0x02C2..=0x02C5 => Script::Common,
0x02C6..=0x02D1 => Script::Common,
0x02D2..=0x02DF => Script::Common,
0x02E5..=0x02E9 => Script::Common,
0x02EC => Script::Common,
0x02ED => Script::Common,
0x02EE => Script::Common,
0x02EF..=0x02FF => Script::Common,
0x0374 => Script::Common,
0x037E => Script::Common,
0x0385 => Script::Common,
0x0387 => Script::Common,
0x0589 => Script::Common,
0x0605 => Script::Common,
0x060C => Script::Common,
0x061B => Script::Common,
0x061C => Script::Common,
0x061F => Script::Common,
0x0640 => Script::Common,
0x06DD => Script::Common,
0x08E2 => Script::Common,
0x0964..=0x0965 => Script::Common,
0x0E3F => Script::Common,
0x0FD5..=0x0FD8 => Script::Common,
0x10FB => Script::Common,
0x16EB..=0x16ED => Script::Common,
0x1735..=0x1736 => Script::Common,
0x1802..=0x1803 => Script::Common,
0x1805 => Script::Common,
0x1CD3 => Script::Common,
0x1CE1 => Script::Common,
0x1CE9..=0x1CEC => Script::Common,
0x1CEE..=0x1CF1 => Script::Common,
0x1CF2..=0x1CF3 => Script::Common,
0x1CF5..=0x1CF6 => Script::Common,
0x2000..=0x200A => Script::Common,
0x200B => Script::Common,
0x200E..=0x200F => Script::Common,
0x2010..=0x2015 => Script::Common,
0x2016..=0x2017 => Script::Common,
0x2018 => Script::Common,
0x2019 => Script::Common,
0x201A => Script::Common,
0x201B..=0x201C => Script::Common,
0x201D => Script::Common,
0x201E => Script::Common,
0x201F => Script::Common,
0x2020..=0x2027 => Script::Common,
0x2028 => Script::Common,
0x2029 => Script::Common,
0x202A..=0x202E => Script::Common,
0x202F => Script::Common,
0x2030..=0x2038 => Script::Common,
0x2039 => Script::Common,
0x203A => Script::Common,
0x203B..=0x203E => Script::Common,
0x203F..=0x2040 => Script::Common,
0x2041..=0x2043 => Script::Common,
0x2044 => Script::Common,
0x2045 => Script::Common,
0x2046 => Script::Common,
0x2047..=0x2051 => Script::Common,
0x2052 => Script::Common,
0x2053 => Script::Common,
0x2054 => Script::Common,
0x2055..=0x205E => Script::Common,
0x205F => Script::Common,
0x2060..=0x2064 => Script::Common,
0x2066..=0x206F => Script::Common,
0x2070 => Script::Common,
0x2074..=0x2079 => Script::Common,
0x207A..=0x207C => Script::Common,
0x207D => Script::Common,
0x207E => Script::Common,
0x2080..=0x2089 => Script::Common,
0x208A..=0x208C => Script::Common,
0x208D => Script::Common,
0x208E => Script::Common,
0x20A0..=0x20BE => Script::Common,
0x2100..=0x2101 => Script::Common,
0x2102 => Script::Common,
0x2103..=0x2106 => Script::Common,
0x2107 => Script::Common,
0x2108..=0x2109 => Script::Common,
0x210A..=0x2113 => Script::Common,
0x2114 => Script::Common,
0x2115 => Script::Common,
0x2116..=0x2117 => Script::Common,
0x2118 => Script::Common,
0x2119..=0x211D => Script::Common,
0x211E..=0x2123 => Script::Common,
0x2124 => Script::Common,
0x2125 => Script::Common,
0x2127 => Script::Common,
0x2128 => Script::Common,
0x2129 => Script::Common,
0x212C..=0x212D => Script::Common,
0x212E => Script::Common,
0x212F..=0x2131 => Script::Common,
0x2133..=0x2134 => Script::Common,
0x2135..=0x2138 => Script::Common,
0x2139 => Script::Common,
0x213A..=0x213B => Script::Common,
0x213C..=0x213F => Script::Common,
0x2140..=0x2144 => Script::Common,
0x2145..=0x2149 => Script::Common,
0x214A => Script::Common,
0x214B => Script::Common,
0x214C..=0x214D => Script::Common,
0x214F => Script::Common,
0x2150..=0x215F => Script::Common,
0x2189 => Script::Common,
0x218A..=0x218B => Script::Common,
0x2190..=0x2194 => Script::Common,
0x2195..=0x2199 => Script::Common,
0x219A..=0x219B => Script::Common,
0x219C..=0x219F => Script::Common,
0x21A0 => Script::Common,
0x21A1..=0x21A2 => Script::Common,
0x21A3 => Script::Common,
0x21A4..=0x21A5 => Script::Common,
0x21A6 => Script::Common,
0x21A7..=0x21AD => Script::Common,
0x21AE => Script::Common,
0x21AF..=0x21CD => Script::Common,
0x21CE..=0x21CF => Script::Common,
0x21D0..=0x21D1 => Script::Common,
0x21D2 => Script::Common,
0x21D3 => Script::Common,
0x21D4 => Script::Common,
0x21D5..=0x21F3 => Script::Common,
0x21F4..=0x22FF => Script::Common,
0x2300..=0x2307 => Script::Common,
0x2308 => Script::Common,
0x2309 => Script::Common,
0x230A => Script::Common,
0x230B => Script::Common,
0x230C..=0x231F => Script::Common,
0x2320..=0x2321 => Script::Common,
0x2322..=0x2328 => Script::Common,
0x2329 => Script::Common,
0x232A => Script::Common,
0x232B..=0x237B => Script::Common,
0x237C => Script::Common,
0x237D..=0x239A => Script::Common,
0x239B..=0x23B3 => Script::Common,
0x23B4..=0x23DB => Script::Common,
0x23DC..=0x23E1 => Script::Common,
0x23E2..=0x23FE => Script::Common,
0x2400..=0x2426 => Script::Common,
0x2440..=0x244A => Script::Common,
0x2460..=0x249B => Script::Common,
0x249C..=0x24E9 => Script::Common,
0x24EA..=0x24FF => Script::Common,
0x2500..=0x25B6 => Script::Common,
0x25B7 => Script::Common,
0x25B8..=0x25C0 => Script::Common,
0x25C1 => Script::Common,
0x25C2..=0x25F7 => Script::Common,
0x25F8..=0x25FF => Script::Common,
0x2600..=0x266E => Script::Common,
0x266F => Script::Common,
0x2670..=0x2767 => Script::Common,
0x2768 => Script::Common,
0x2769 => Script::Common,
0x276A => Script::Common,
0x276B => Script::Common,
0x276C => Script::Common,
0x276D => Script::Common,
0x276E => Script::Common,
0x276F => Script::Common,
0x2770 => Script::Common,
0x2771 => Script::Common,
0x2772 => Script::Common,
0x2773 => Script::Common,
0x2774 => Script::Common,
0x2775 => Script::Common,
0x2776..=0x2793 => Script::Common,
0x2794..=0x27BF => Script::Common,
0x27C0..=0x27C4 => Script::Common,
0x27C5 => Script::Common,
0x27C6 => Script::Common,
0x27C7..=0x27E5 => Script::Common,
0x27E6 => Script::Common,
0x27E7 => Script::Common,
0x27E8 => Script::Common,
0x27E9 => Script::Common,
0x27EA => Script::Common,
0x27EB => Script::Common,
0x27EC => Script::Common,
0x27ED => Script::Common,
0x27EE => Script::Common,
0x27EF => Script::Common,
0x27F0..=0x27FF => Script::Common,
0x2900..=0x2982 => Script::Common,
0x2983 => Script::Common,
0x2984 => Script::Common,
0x2985 => Script::Common,
0x2986 => Script::Common,
0x2987 => Script::Common,
0x2988 => Script::Common,
0x2989 => Script::Common,
0x298A => Script::Common,
0x298B => Script::Common,
0x298C => Script::Common,
0x298D => Script::Common,
0x298E => Script::Common,
0x298F => Script::Common,
0x2990 => Script::Common,
0x2991 => Script::Common,
0x2992 => Script::Common,
0x2993 => Script::Common,
0x2994 => Script::Common,
0x2995 => Script::Common,
0x2996 => Script::Common,
0x2997 => Script::Common,
0x2998 => Script::Common,
0x2999..=0x29D7 => Script::Common,
0x29D8 => Script::Common,
0x29D9 => Script::Common,
0x29DA => Script::Common,
0x29DB => Script::Common,
0x29DC..=0x29FB => Script::Common,
0x29FC => Script::Common,
0x29FD => Script::Common,
0x29FE..=0x2AFF => Script::Common,
0x2B00..=0x2B2F => Script::Common,
0x2B30..=0x2B44 => Script::Common,
0x2B45..=0x2B46 => Script::Common,
0x2B47..=0x2B4C => Script::Common,
0x2B4D..=0x2B73 => Script::Common,
0x2B76..=0x2B95 => Script::Common,
0x2B98..=0x2BB9 => Script::Common,
0x2BBD..=0x2BC8 => Script::Common,
0x2BCA..=0x2BD1 => Script::Common,
0x2BEC..=0x2BEF => Script::Common,
0x2E00..=0x2E01 => Script::Common,
0x2E02 => Script::Common,
0x2E03 => Script::Common,
0x2E04 => Script::Common,
0x2E05 => Script::Common,
0x2E06..=0x2E08 => Script::Common,
0x2E09 => Script::Common,
0x2E0A => Script::Common,
0x2E0B => Script::Common,
0x2E0C => Script::Common,
0x2E0D => Script::Common,
0x2E0E..=0x2E16 => Script::Common,
0x2E17 => Script::Common,
0x2E18..=0x2E19 => Script::Common,
0x2E1A => Script::Common,
0x2E1B => Script::Common,
0x2E1C => Script::Common,
0x2E1D => Script::Common,
0x2E1E..=0x2E1F => Script::Common,
0x2E20 => Script::Common,
0x2E21 => Script::Common,
0x2E22 => Script::Common,
0x2E23 => Script::Common,
0x2E24 => Script::Common,
0x2E25 => Script::Common,
0x2E26 => Script::Common,
0x2E27 => Script::Common,
0x2E28 => Script::Common,
0x2E29 => Script::Common,
0x2E2A..=0x2E2E => Script::Common,
0x2E2F => Script::Common,
0x2E30..=0x2E39 => Script::Common,
0x2E3A..=0x2E3B => Script::Common,
0x2E3C..=0x2E3F => Script::Common,
0x2E40 => Script::Common,
0x2E41 => Script::Common,
0x2E42 => Script::Common,
0x2E43..=0x2E44 => Script::Common,
0x2FF0..=0x2FFB => Script::Common,
0x3000 => Script::Common,
0x3001..=0x3003 => Script::Common,
0x3004 => Script::Common,
0x3006 => Script::Common,
0x3008 => Script::Common,
0x3009 => Script::Common,
0x300A => Script::Common,
0x300B => Script::Common,
0x300C => Script::Common,
0x300D => Script::Common,
0x300E => Script::Common,
0x300F => Script::Common,
0x3010 => Script::Common,
0x3011 => Script::Common,
0x3012..=0x3013 => Script::Common,
0x3014 => Script::Common,
0x3015 => Script::Common,
0x3016 => Script::Common,
0x3017 => Script::Common,
0x3018 => Script::Common,
0x3019 => Script::Common,
0x301A => Script::Common,
0x301B => Script::Common,
0x301C => Script::Common,
0x301D => Script::Common,
0x301E..=0x301F => Script::Common,
0x3020 => Script::Common,
0x3030 => Script::Common,
0x3031..=0x3035 => Script::Common,
0x3036..=0x3037 => Script::Common,
0x303C => Script::Common,
0x303D => Script::Common,
0x303E..=0x303F => Script::Common,
0x309B..=0x309C => Script::Common,
0x30A0 => Script::Common,
0x30FB => Script::Common,
0x30FC => Script::Common,
0x3190..=0x3191 => Script::Common,
0x3192..=0x3195 => Script::Common,
0x3196..=0x319F => Script::Common,
0x31C0..=0x31E3 => Script::Common,
0x3220..=0x3229 => Script::Common,
0x322A..=0x3247 => Script::Common,
0x3248..=0x324F => Script::Common,
0x3250 => Script::Common,
0x3251..=0x325F => Script::Common,
0x327F => Script::Common,
0x3280..=0x3289 => Script::Common,
0x328A..=0x32B0 => Script::Common,
0x32B1..=0x32BF => Script::Common,
0x32C0..=0x32CF => Script::Common,
0x3358..=0x33FF => Script::Common,
0x4DC0..=0x4DFF => Script::Common,
0xA700..=0xA716 => Script::Common,
0xA717..=0xA71F => Script::Common,
0xA720..=0xA721 => Script::Common,
0xA788 => Script::Common,
0xA789..=0xA78A => Script::Common,
0xA830..=0xA835 => Script::Common,
0xA836..=0xA837 => Script::Common,
0xA838 => Script::Common,
0xA839 => Script::Common,
0xA92E => Script::Common,
0xA9CF => Script::Common,
0xAB5B => Script::Common,
0xFD3E => Script::Common,
0xFD3F => Script::Common,
0xFE10..=0xFE16 => Script::Common,
0xFE17 => Script::Common,
0xFE18 => Script::Common,
0xFE19 => Script::Common,
0xFE30 => Script::Common,
0xFE31..=0xFE32 => Script::Common,
0xFE33..=0xFE34 => Script::Common,
0xFE35 => Script::Common,
0xFE36 => Script::Common,
0xFE37 => Script::Common,
0xFE38 => Script::Common,
0xFE39 => Script::Common,
0xFE3A => Script::Common,
0xFE3B => Script::Common,
0xFE3C => Script::Common,
0xFE3D => Script::Common,
0xFE3E => Script::Common,
0xFE3F => Script::Common,
0xFE40 => Script::Common,
0xFE41 => Script::Common,
0xFE42 => Script::Common,
0xFE43 => Script::Common,
0xFE44 => Script::Common,
0xFE45..=0xFE46 => Script::Common,
0xFE47 => Script::Common,
0xFE48 => Script::Common,
0xFE49..=0xFE4C => Script::Common,
0xFE4D..=0xFE4F => Script::Common,
0xFE50..=0xFE52 => Script::Common,
0xFE54..=0xFE57 => Script::Common,
0xFE58 => Script::Common,
0xFE59 => Script::Common,
0xFE5A => Script::Common,
0xFE5B => Script::Common,
0xFE5C => Script::Common,
0xFE5D => Script::Common,
0xFE5E => Script::Common,
0xFE5F..=0xFE61 => Script::Common,
0xFE62 => Script::Common,
0xFE63 => Script::Common,
0xFE64..=0xFE66 => Script::Common,
0xFE68 => Script::Common,
0xFE69 => Script::Common,
0xFE6A..=0xFE6B => Script::Common,
0xFEFF => Script::Common,
0xFF01..=0xFF03 => Script::Common,
0xFF04 => Script::Common,
0xFF05..=0xFF07 => Script::Common,
0xFF08 => Script::Common,
0xFF09 => Script::Common,
0xFF0A => Script::Common,
0xFF0B => Script::Common,
0xFF0C => Script::Common,
0xFF0D => Script::Common,
0xFF0E..=0xFF0F => Script::Common,
0xFF10..=0xFF19 => Script::Common,
0xFF1A..=0xFF1B => Script::Common,
0xFF1C..=0xFF1E => Script::Common,
0xFF1F..=0xFF20 => Script::Common,
0xFF3B => Script::Common,
0xFF3C => Script::Common,
0xFF3D => Script::Common,
0xFF3E => Script::Common,
0xFF3F => Script::Common,
0xFF40 => Script::Common,
0xFF5B => Script::Common,
0xFF5C => Script::Common,
0xFF5D => Script::Common,
0xFF5E => Script::Common,
0xFF5F => Script::Common,
0xFF60 => Script::Common,
0xFF61 => Script::Common,
0xFF62 => Script::Common,
0xFF63 => Script::Common,
0xFF64..=0xFF65 => Script::Common,
0xFF70 => Script::Common,
0xFF9E..=0xFF9F => Script::Common,
0xFFE0..=0xFFE1 => Script::Common,
0xFFE2 => Script::Common,
0xFFE3 => Script::Common,
0xFFE4 => Script::Common,
0xFFE5..=0xFFE6 => Script::Common,
0xFFE8 => Script::Common,
0xFFE9..=0xFFEC => Script::Common,
0xFFED..=0xFFEE => Script::Common,
0xFFF9..=0xFFFB => Script::Common,
0xFFFC..=0xFFFD => Script::Common,
0x10100..=0x10102 => Script::Common,
0x10107..=0x10133 => Script::Common,
0x10137..=0x1013F => Script::Common,
0x10190..=0x1019B => Script::Common,
0x101D0..=0x101FC => Script::Common,
0x102E1..=0x102FB => Script::Common,
0x1BCA0..=0x1BCA3 => Script::Common,
0x1D000..=0x1D0F5 => Script::Common,
0x1D100..=0x1D126 => Script::Common,
0x1D129..=0x1D164 => Script::Common,
0x1D165..=0x1D166 => Script::Common,
0x1D16A..=0x1D16C => Script::Common,
0x1D16D..=0x1D172 => Script::Common,
0x1D173..=0x1D17A => Script::Common,
0x1D183..=0x1D184 => Script::Common,
0x1D18C..=0x1D1A9 => Script::Common,
0x1D1AE..=0x1D1E8 => Script::Common,
0x1D300..=0x1D356 => Script::Common,
0x1D360..=0x1D371 => Script::Common,
0x1D400..=0x1D454 => Script::Common,
0x1D456..=0x1D49C => Script::Common,
0x1D49E..=0x1D49F => Script::Common,
0x1D4A2 => Script::Common,
0x1D4A5..=0x1D4A6 => Script::Common,
0x1D4A9..=0x1D4AC => Script::Common,
0x1D4AE..=0x1D4B9 => Script::Common,
0x1D4BB => Script::Common,
0x1D4BD..=0x1D4C3 => Script::Common,
0x1D4C5..=0x1D505 => Script::Common,
0x1D507..=0x1D50A => Script::Common,
0x1D50D..=0x1D514 => Script::Common,
0x1D516..=0x1D51C => Script::Common,
0x1D51E..=0x1D539 => Script::Common,
0x1D53B..=0x1D53E => Script::Common,
0x1D540..=0x1D544 => Script::Common,
0x1D546 => Script::Common,
0x1D54A..=0x1D550 => Script::Common,
0x1D552..=0x1D6A5 => Script::Common,
0x1D6A8..=0x1D6C0 => Script::Common,
0x1D6C1 => Script::Common,
0x1D6C2..=0x1D6DA => Script::Common,
0x1D6DB => Script::Common,
0x1D6DC..=0x1D6FA => Script::Common,
0x1D6FB => Script::Common,
0x1D6FC..=0x1D714 => Script::Common,
0x1D715 => Script::Common,
0x1D716..=0x1D734 => Script::Common,
0x1D735 => Script::Common,
0x1D736..=0x1D74E => Script::Common,
0x1D74F => Script::Common,
0x1D750..=0x1D76E => Script::Common,
0x1D76F => Script::Common,
0x1D770..=0x1D788 => Script::Common,
0x1D789 => Script::Common,
0x1D78A..=0x1D7A8 => Script::Common,
0x1D7A9 => Script::Common,
0x1D7AA..=0x1D7C2 => Script::Common,
0x1D7C3 => Script::Common,
0x1D7C4..=0x1D7CB => Script::Common,
0x1D7CE..=0x1D7FF => Script::Common,
0x1F000..=0x1F02B => Script::Common,
0x1F030..=0x1F093 => Script::Common,
0x1F0A0..=0x1F0AE => Script::Common,
0x1F0B1..=0x1F0BF => Script::Common,
0x1F0C1..=0x1F0CF => Script::Common,
0x1F0D1..=0x1F0F5 => Script::Common,
0x1F100..=0x1F10C => Script::Common,
0x1F110..=0x1F12E => Script::Common,
0x1F130..=0x1F16B => Script::Common,
0x1F170..=0x1F1AC => Script::Common,
0x1F1E6..=0x1F1FF => Script::Common,
0x1F201..=0x1F202 => Script::Common,
0x1F210..=0x1F23B => Script::Common,
0x1F240..=0x1F248 => Script::Common,
0x1F250..=0x1F251 => Script::Common,
0x1F300..=0x1F3FA => Script::Common,
0x1F3FB..=0x1F3FF => Script::Common,
0x1F400..=0x1F6D2 => Script::Common,
0x1F6E0..=0x1F6EC => Script::Common,
0x1F6F0..=0x1F6F6 => Script::Common,
0x1F700..=0x1F773 => Script::Common,
0x1F780..=0x1F7D4 => Script::Common,
0x1F800..=0x1F80B => Script::Common,
0x1F810..=0x1F847 => Script::Common,
0x1F850..=0x1F859 => Script::Common,
0x1F860..=0x1F887 => Script::Common,
0x1F890..=0x1F8AD => Script::Common,
0x1F910..=0x1F91E => Script::Common,
0x1F920..=0x1F927 => Script::Common,
0x1F930 => Script::Common,
0x1F933..=0x1F93E => Script::Common,
0x1F940..=0x1F94B => Script::Common,
0x1F950..=0x1F95E => Script::Common,
0x1F980..=0x1F991 => Script::Common,
0x1F9C0 => Script::Common,
0xE0001 => Script::Common,
0xE0020..=0xE007F => Script::Common,
0x0041..=0x005A => Script::Latin,
0x0061..=0x007A => Script::Latin,
0x00AA => Script::Latin,
0x00BA => Script::Latin,
0x00C0..=0x00D6 => Script::Latin,
0x00D8..=0x00F6 => Script::Latin,
0x00F8..=0x01BA => Script::Latin,
0x01BB => Script::Latin,
0x01BC..=0x01BF => Script::Latin,
0x01C0..=0x01C3 => Script::Latin,
0x01C4..=0x0293 => Script::Latin,
0x0294 => Script::Latin,
0x0295..=0x02AF => Script::Latin,
0x02B0..=0x02B8 => Script::Latin,
0x02E0..=0x02E4 => Script::Latin,
0x1D00..=0x1D25 => Script::Latin,
0x1D2C..=0x1D5C => Script::Latin,
0x1D62..=0x1D65 => Script::Latin,
0x1D6B..=0x1D77 => Script::Latin,
0x1D79..=0x1D9A => Script::Latin,
0x1D9B..=0x1DBE => Script::Latin,
0x1E00..=0x1EFF => Script::Latin,
0x2071 => Script::Latin,
0x207F => Script::Latin,
0x2090..=0x209C => Script::Latin,
0x212A..=0x212B => Script::Latin,
0x2132 => Script::Latin,
0x214E => Script::Latin,
0x2160..=0x2182 => Script::Latin,
0x2183..=0x2184 => Script::Latin,
0x2185..=0x2188 => Script::Latin,
0x2C60..=0x2C7B => Script::Latin,
0x2C7C..=0x2C7D => Script::Latin,
0x2C7E..=0x2C7F => Script::Latin,
0xA722..=0xA76F => Script::Latin,
0xA770 => Script::Latin,
0xA771..=0xA787 => Script::Latin,
0xA78B..=0xA78E => Script::Latin,
0xA78F => Script::Latin,
0xA790..=0xA7AE => Script::Latin,
0xA7B0..=0xA7B7 => Script::Latin,
0xA7F7 => Script::Latin,
0xA7F8..=0xA7F9 => Script::Latin,
0xA7FA => Script::Latin,
0xA7FB..=0xA7FF => Script::Latin,
0xAB30..=0xAB5A => Script::Latin,
0xAB5C..=0xAB5F => Script::Latin,
0xAB60..=0xAB64 => Script::Latin,
0xFB00..=0xFB06 => Script::Latin,
0xFF21..=0xFF3A => Script::Latin,
0xFF41..=0xFF5A => Script::Latin,
0x0370..=0x0373 => Script::Greek,
0x0375 => Script::Greek,
0x0376..=0x0377 => Script::Greek,
0x037A => Script::Greek,
0x037B..=0x037D => Script::Greek,
0x037F => Script::Greek,
0x0384 => Script::Greek,
0x0386 => Script::Greek,
0x0388..=0x038A => Script::Greek,
0x038C => Script::Greek,
0x038E..=0x03A1 => Script::Greek,
0x03A3..=0x03E1 => Script::Greek,
0x03F0..=0x03F5 => Script::Greek,
0x03F6 => Script::Greek,
0x03F7..=0x03FF => Script::Greek,
0x1D26..=0x1D2A => Script::Greek,
0x1D5D..=0x1D61 => Script::Greek,
0x1D66..=0x1D6A => Script::Greek,
0x1DBF => Script::Greek,
0x1F00..=0x1F15 => Script::Greek,
0x1F18..=0x1F1D => Script::Greek,
0x1F20..=0x1F45 => Script::Greek,
0x1F48..=0x1F4D => Script::Greek,
0x1F50..=0x1F57 => Script::Greek,
0x1F59 => Script::Greek,
0x1F5B => Script::Greek,
0x1F5D => Script::Greek,
0x1F5F..=0x1F7D => Script::Greek,
0x1F80..=0x1FB4 => Script::Greek,
0x1FB6..=0x1FBC => Script::Greek,
0x1FBD => Script::Greek,
0x1FBE => Script::Greek,
0x1FBF..=0x1FC1 => Script::Greek,
0x1FC2..=0x1FC4 => Script::Greek,
0x1FC6..=0x1FCC => Script::Greek,
0x1FCD..=0x1FCF => Script::Greek,
0x1FD0..=0x1FD3 => Script::Greek,
0x1FD6..=0x1FDB => Script::Greek,
0x1FDD..=0x1FDF => Script::Greek,
0x1FE0..=0x1FEC => Script::Greek,
0x1FED..=0x1FEF => Script::Greek,
0x1FF2..=0x1FF4 => Script::Greek,
0x1FF6..=0x1FFC => Script::Greek,
0x1FFD..=0x1FFE => Script::Greek,
0x2126 => Script::Greek,
0xAB65 => Script::Greek,
0x10140..=0x10174 => Script::Greek,
0x10175..=0x10178 => Script::Greek,
0x10179..=0x10189 => Script::Greek,
0x1018A..=0x1018B => Script::Greek,
0x1018C..=0x1018E => Script::Greek,
0x101A0 => Script::Greek,
0x1D200..=0x1D241 => Script::Greek,
0x1D242..=0x1D244 => Script::Greek,
0x1D245 => Script::Greek,
0x0400..=0x0481 => Script::Cyrillic,
0x0482 => Script::Cyrillic,
0x0483..=0x0484 => Script::Cyrillic,
0x0487 => Script::Cyrillic,
0x0488..=0x0489 => Script::Cyrillic,
0x048A..=0x052F => Script::Cyrillic,
0x1C80..=0x1C88 => Script::Cyrillic,
0x1D2B => Script::Cyrillic,
0x1D78 => Script::Cyrillic,
0x2DE0..=0x2DFF => Script::Cyrillic,
0xA640..=0xA66D => Script::Cyrillic,
0xA66E => Script::Cyrillic,
0xA66F => Script::Cyrillic,
0xA670..=0xA672 => Script::Cyrillic,
0xA673 => Script::Cyrillic,
0xA674..=0xA67D => Script::Cyrillic,
0xA67E => Script::Cyrillic,
0xA67F => Script::Cyrillic,
0xA680..=0xA69B => Script::Cyrillic,
0xA69C..=0xA69D => Script::Cyrillic,
0xA69E..=0xA69F => Script::Cyrillic,
0xFE2E..=0xFE2F => Script::Cyrillic,
0x0531..=0x0556 => Script::Armenian,
0x0559 => Script::Armenian,
0x055A..=0x055F => Script::Armenian,
0x0561..=0x0587 => Script::Armenian,
0x058A => Script::Armenian,
0x058D..=0x058E => Script::Armenian,
0x058F => Script::Armenian,
0xFB13..=0xFB17 => Script::Armenian,
0x0591..=0x05BD => Script::Hebrew,
0x05BE => Script::Hebrew,
0x05BF => Script::Hebrew,
0x05C0 => Script::Hebrew,
0x05C1..=0x05C2 => Script::Hebrew,
0x05C3 => Script::Hebrew,
0x05C4..=0x05C5 => Script::Hebrew,
0x05C6 => Script::Hebrew,
0x05C7 => Script::Hebrew,
0x05D0..=0x05EA => Script::Hebrew,
0x05F0..=0x05F2 => Script::Hebrew,
0x05F3..=0x05F4 => Script::Hebrew,
0xFB1D => Script::Hebrew,
0xFB1E => Script::Hebrew,
0xFB1F..=0xFB28 => Script::Hebrew,
0xFB29 => Script::Hebrew,
0xFB2A..=0xFB36 => Script::Hebrew,
0xFB38..=0xFB3C => Script::Hebrew,
0xFB3E => Script::Hebrew,
0xFB40..=0xFB41 => Script::Hebrew,
0xFB43..=0xFB44 => Script::Hebrew,
0xFB46..=0xFB4F => Script::Hebrew,
0x0600..=0x0604 => Script::Arabic,
0x0606..=0x0608 => Script::Arabic,
0x0609..=0x060A => Script::Arabic,
0x060B => Script::Arabic,
0x060D => Script::Arabic,
0x060E..=0x060F => Script::Arabic,
0x0610..=0x061A => Script::Arabic,
0x061E => Script::Arabic,
0x0620..=0x063F => Script::Arabic,
0x0641..=0x064A => Script::Arabic,
0x0656..=0x065F => Script::Arabic,
0x0660..=0x0669 => Script::Arabic,
0x066A..=0x066D => Script::Arabic,
0x066E..=0x066F => Script::Arabic,
0x0671..=0x06D3 => Script::Arabic,
0x06D4 => Script::Arabic,
0x06D5 => Script::Arabic,
0x06D6..=0x06DC => Script::Arabic,
0x06DE => Script::Arabic,
0x06DF..=0x06E4 => Script::Arabic,
0x06E5..=0x06E6 => Script::Arabic,
0x06E7..=0x06E8 => Script::Arabic,
0x06E9 => Script::Arabic,
0x06EA..=0x06ED => Script::Arabic,
0x06EE..=0x06EF => Script::Arabic,
0x06F0..=0x06F9 => Script::Arabic,
0x06FA..=0x06FC => Script::Arabic,
0x06FD..=0x06FE => Script::Arabic,
0x06FF => Script::Arabic,
0x0750..=0x077F => Script::Arabic,
0x08A0..=0x08B4 => Script::Arabic,
0x08B6..=0x08BD => Script::Arabic,
0x08D4..=0x08E1 => Script::Arabic,
0x08E3..=0x08FF => Script::Arabic,
0xFB50..=0xFBB1 => Script::Arabic,
0xFBB2..=0xFBC1 => Script::Arabic,
0xFBD3..=0xFD3D => Script::Arabic,
0xFD50..=0xFD8F => Script::Arabic,
0xFD92..=0xFDC7 => Script::Arabic,
0xFDF0..=0xFDFB => Script::Arabic,
0xFDFC => Script::Arabic,
0xFDFD => Script::Arabic,
0xFE70..=0xFE74 => Script::Arabic,
0xFE76..=0xFEFC => Script::Arabic,
0x10E60..=0x10E7E => Script::Arabic,
0x1EE00..=0x1EE03 => Script::Arabic,
0x1EE05..=0x1EE1F => Script::Arabic,
0x1EE21..=0x1EE22 => Script::Arabic,
0x1EE24 => Script::Arabic,
0x1EE27 => Script::Arabic,
0x1EE29..=0x1EE32 => Script::Arabic,
0x1EE34..=0x1EE37 => Script::Arabic,
0x1EE39 => Script::Arabic,
0x1EE3B => Script::Arabic,
0x1EE42 => Script::Arabic,
0x1EE47 => Script::Arabic,
0x1EE49 => Script::Arabic,
0x1EE4B => Script::Arabic,
0x1EE4D..=0x1EE4F => Script::Arabic,
0x1EE51..=0x1EE52 => Script::Arabic,
0x1EE54 => Script::Arabic,
0x1EE57 => Script::Arabic,
0x1EE59 => Script::Arabic,
0x1EE5B => Script::Arabic,
0x1EE5D => Script::Arabic,
0x1EE5F => Script::Arabic,
0x1EE61..=0x1EE62 => Script::Arabic,
0x1EE64 => Script::Arabic,
0x1EE67..=0x1EE6A => Script::Arabic,
0x1EE6C..=0x1EE72 => Script::Arabic,
0x1EE74..=0x1EE77 => Script::Arabic,
0x1EE79..=0x1EE7C => Script::Arabic,
0x1EE7E => Script::Arabic,
0x1EE80..=0x1EE89 => Script::Arabic,
0x1EE8B..=0x1EE9B => Script::Arabic,
0x1EEA1..=0x1EEA3 => Script::Arabic,
0x1EEA5..=0x1EEA9 => Script::Arabic,
0x1EEAB..=0x1EEBB => Script::Arabic,
0x1EEF0..=0x1EEF1 => Script::Arabic,
0x0700..=0x070D => Script::Syriac,
0x070F => Script::Syriac,
0x0710 => Script::Syriac,
0x0711 => Script::Syriac,
0x0712..=0x072F => Script::Syriac,
0x0730..=0x074A => Script::Syriac,
0x074D..=0x074F => Script::Syriac,
0x0780..=0x07A5 => Script::Thaana,
0x07A6..=0x07B0 => Script::Thaana,
0x07B1 => Script::Thaana,
0x0900..=0x0902 => Script::Devanagari,
0x0903 => Script::Devanagari,
0x0904..=0x0939 => Script::Devanagari,
0x093A => Script::Devanagari,
0x093B => Script::Devanagari,
0x093C => Script::Devanagari,
0x093D => Script::Devanagari,
0x093E..=0x0940 => Script::Devanagari,
0x0941..=0x0948 => Script::Devanagari,
0x0949..=0x094C => Script::Devanagari,
0x094D => Script::Devanagari,
0x094E..=0x094F => Script::Devanagari,
0x0950 => Script::Devanagari,
0x0953..=0x0957 => Script::Devanagari,
0x0958..=0x0961 => Script::Devanagari,
0x0962..=0x0963 => Script::Devanagari,
0x0966..=0x096F => Script::Devanagari,
0x0970 => Script::Devanagari,
0x0971 => Script::Devanagari,
0x0972..=0x097F => Script::Devanagari,
0xA8E0..=0xA8F1 => Script::Devanagari,
0xA8F2..=0xA8F7 => Script::Devanagari,
0xA8F8..=0xA8FA => Script::Devanagari,
0xA8FB => Script::Devanagari,
0xA8FC => Script::Devanagari,
0xA8FD => Script::Devanagari,
0x0980 => Script::Bengali,
0x0981 => Script::Bengali,
0x0982..=0x0983 => Script::Bengali,
0x0985..=0x098C => Script::Bengali,
0x098F..=0x0990 => Script::Bengali,
0x0993..=0x09A8 => Script::Bengali,
0x09AA..=0x09B0 => Script::Bengali,
0x09B2 => Script::Bengali,
0x09B6..=0x09B9 => Script::Bengali,
0x09BC => Script::Bengali,
0x09BD => Script::Bengali,
0x09BE..=0x09C0 => Script::Bengali,
0x09C1..=0x09C4 => Script::Bengali,
0x09C7..=0x09C8 => Script::Bengali,
0x09CB..=0x09CC => Script::Bengali,
0x09CD => Script::Bengali,
0x09CE => Script::Bengali,
0x09D7 => Script::Bengali,
0x09DC..=0x09DD => Script::Bengali,
0x09DF..=0x09E1 => Script::Bengali,
0x09E2..=0x09E3 => Script::Bengali,
0x09E6..=0x09EF => Script::Bengali,
0x09F0..=0x09F1 => Script::Bengali,
0x09F2..=0x09F3 => Script::Bengali,
0x09F4..=0x09F9 => Script::Bengali,
0x09FA => Script::Bengali,
0x09FB => Script::Bengali,
0x0A01..=0x0A02 => Script::Gurmukhi,
0x0A03 => Script::Gurmukhi,
0x0A05..=0x0A0A => Script::Gurmukhi,
0x0A0F..=0x0A10 => Script::Gurmukhi,
0x0A13..=0x0A28 => Script::Gurmukhi,
0x0A2A..=0x0A30 => Script::Gurmukhi,
0x0A32..=0x0A33 => Script::Gurmukhi,
0x0A35..=0x0A36 => Script::Gurmukhi,
0x0A38..=0x0A39 => Script::Gurmukhi,
0x0A3C => Script::Gurmukhi,
0x0A3E..=0x0A40 => Script::Gurmukhi,
0x0A41..=0x0A42 => Script::Gurmukhi,
0x0A47..=0x0A48 => Script::Gurmukhi,
0x0A4B..=0x0A4D => Script::Gurmukhi,
0x0A51 => Script::Gurmukhi,
0x0A59..=0x0A5C => Script::Gurmukhi,
0x0A5E => Script::Gurmukhi,
0x0A66..=0x0A6F => Script::Gurmukhi,
0x0A70..=0x0A71 => Script::Gurmukhi,
0x0A72..=0x0A74 => Script::Gurmukhi,
0x0A75 => Script::Gurmukhi,
0x0A81..=0x0A82 => Script::Gujarati,
0x0A83 => Script::Gujarati,
0x0A85..=0x0A8D => Script::Gujarati,
0x0A8F..=0x0A91 => Script::Gujarati,
0x0A93..=0x0AA8 => Script::Gujarati,
0x0AAA..=0x0AB0 => Script::Gujarati,
0x0AB2..=0x0AB3 => Script::Gujarati,
0x0AB5..=0x0AB9 => Script::Gujarati,
0x0ABC => Script::Gujarati,
0x0ABD => Script::Gujarati,
0x0ABE..=0x0AC0 => Script::Gujarati,
0x0AC1..=0x0AC5 => Script::Gujarati,
0x0AC7..=0x0AC8 => Script::Gujarati,
0x0AC9 => Script::Gujarati,
0x0ACB..=0x0ACC => Script::Gujarati,
0x0ACD => Script::Gujarati,
0x0AD0 => Script::Gujarati,
0x0AE0..=0x0AE1 => Script::Gujarati,
0x0AE2..=0x0AE3 => Script::Gujarati,
0x0AE6..=0x0AEF => Script::Gujarati,
0x0AF0 => Script::Gujarati,
0x0AF1 => Script::Gujarati,
0x0AF9 => Script::Gujarati,
0x0B01 => Script::Oriya,
0x0B02..=0x0B03 => Script::Oriya,
0x0B05..=0x0B0C => Script::Oriya,
0x0B0F..=0x0B10 => Script::Oriya,
0x0B13..=0x0B28 => Script::Oriya,
0x0B2A..=0x0B30 => Script::Oriya,
0x0B32..=0x0B33 => Script::Oriya,
0x0B35..=0x0B39 => Script::Oriya,
0x0B3C => Script::Oriya,
0x0B3D => Script::Oriya,
0x0B3E => Script::Oriya,
0x0B3F => Script::Oriya,
0x0B40 => Script::Oriya,
0x0B41..=0x0B44 => Script::Oriya,
0x0B47..=0x0B48 => Script::Oriya,
0x0B4B..=0x0B4C => Script::Oriya,
0x0B4D => Script::Oriya,
0x0B56 => Script::Oriya,
0x0B57 => Script::Oriya,
0x0B5C..=0x0B5D => Script::Oriya,
0x0B5F..=0x0B61 => Script::Oriya,
0x0B62..=0x0B63 => Script::Oriya,
0x0B66..=0x0B6F => Script::Oriya,
0x0B70 => Script::Oriya,
0x0B71 => Script::Oriya,
0x0B72..=0x0B77 => Script::Oriya,
0x0B82 => Script::Tamil,
0x0B83 => Script::Tamil,
0x0B85..=0x0B8A => Script::Tamil,
0x0B8E..=0x0B90 => Script::Tamil,
0x0B92..=0x0B95 => Script::Tamil,
0x0B99..=0x0B9A => Script::Tamil,
0x0B9C => Script::Tamil,
0x0B9E..=0x0B9F => Script::Tamil,
0x0BA3..=0x0BA4 => Script::Tamil,
0x0BA8..=0x0BAA => Script::Tamil,
0x0BAE..=0x0BB9 => Script::Tamil,
0x0BBE..=0x0BBF => Script::Tamil,
0x0BC0 => Script::Tamil,
0x0BC1..=0x0BC2 => Script::Tamil,
0x0BC6..=0x0BC8 => Script::Tamil,
0x0BCA..=0x0BCC => Script::Tamil,
0x0BCD => Script::Tamil,
0x0BD0 => Script::Tamil,
0x0BD7 => Script::Tamil,
0x0BE6..=0x0BEF => Script::Tamil,
0x0BF0..=0x0BF2 => Script::Tamil,
0x0BF3..=0x0BF8 => Script::Tamil,
0x0BF9 => Script::Tamil,
0x0BFA => Script::Tamil,
0x0C00 => Script::Telugu,
0x0C01..=0x0C03 => Script::Telugu,
0x0C05..=0x0C0C => Script::Telugu,
0x0C0E..=0x0C10 => Script::Telugu,
0x0C12..=0x0C28 => Script::Telugu,
0x0C2A..=0x0C39 => Script::Telugu,
0x0C3D => Script::Telugu,
0x0C3E..=0x0C40 => Script::Telugu,
0x0C41..=0x0C44 => Script::Telugu,
0x0C46..=0x0C48 => Script::Telugu,
0x0C4A..=0x0C4D => Script::Telugu,
0x0C55..=0x0C56 => Script::Telugu,
0x0C58..=0x0C5A => Script::Telugu,
0x0C60..=0x0C61 => Script::Telugu,
0x0C62..=0x0C63 => Script::Telugu,
0x0C66..=0x0C6F => Script::Telugu,
0x0C78..=0x0C7E => Script::Telugu,
0x0C7F => Script::Telugu,
0x0C80 => Script::Kannada,
0x0C81 => Script::Kannada,
0x0C82..=0x0C83 => Script::Kannada,
0x0C85..=0x0C8C => Script::Kannada,
0x0C8E..=0x0C90 => Script::Kannada,
0x0C92..=0x0CA8 => Script::Kannada,
0x0CAA..=0x0CB3 => Script::Kannada,
0x0CB5..=0x0CB9 => Script::Kannada,
0x0CBC => Script::Kannada,
0x0CBD => Script::Kannada,
0x0CBE => Script::Kannada,
0x0CBF => Script::Kannada,
0x0CC0..=0x0CC4 => Script::Kannada,
0x0CC6 => Script::Kannada,
0x0CC7..=0x0CC8 => Script::Kannada,
0x0CCA..=0x0CCB => Script::Kannada,
0x0CCC..=0x0CCD => Script::Kannada,
0x0CD5..=0x0CD6 => Script::Kannada,
0x0CDE => Script::Kannada,
0x0CE0..=0x0CE1 => Script::Kannada,
0x0CE2..=0x0CE3 => Script::Kannada,
0x0CE6..=0x0CEF => Script::Kannada,
0x0CF1..=0x0CF2 => Script::Kannada,
0x0D01 => Script::Malayalam,
0x0D02..=0x0D03 => Script::Malayalam,
0x0D05..=0x0D0C => Script::Malayalam,
0x0D0E..=0x0D10 => Script::Malayalam,
0x0D12..=0x0D3A => Script::Malayalam,
0x0D3D => Script::Malayalam,
0x0D3E..=0x0D40 => Script::Malayalam,
0x0D41..=0x0D44 => Script::Malayalam,
0x0D46..=0x0D48 => Script::Malayalam,
0x0D4A..=0x0D4C => Script::Malayalam,
0x0D4D => Script::Malayalam,
0x0D4E => Script::Malayalam,
0x0D4F => Script::Malayalam,
0x0D54..=0x0D56 => Script::Malayalam,
0x0D57 => Script::Malayalam,
0x0D58..=0x0D5E => Script::Malayalam,
0x0D5F..=0x0D61 => Script::Malayalam,
0x0D62..=0x0D63 => Script::Malayalam,
0x0D66..=0x0D6F => Script::Malayalam,
0x0D70..=0x0D78 => Script::Malayalam,
0x0D79 => Script::Malayalam,
0x0D7A..=0x0D7F => Script::Malayalam,
0x0D82..=0x0D83 => Script::Sinhala,
0x0D85..=0x0D96 => Script::Sinhala,
0x0D9A..=0x0DB1 => Script::Sinhala,
0x0DB3..=0x0DBB => Script::Sinhala,
0x0DBD => Script::Sinhala,
0x0DC0..=0x0DC6 => Script::Sinhala,
0x0DCA => Script::Sinhala,
0x0DCF..=0x0DD1 => Script::Sinhala,
0x0DD2..=0x0DD4 => Script::Sinhala,
0x0DD6 => Script::Sinhala,
0x0DD8..=0x0DDF => Script::Sinhala,
0x0DE6..=0x0DEF => Script::Sinhala,
0x0DF2..=0x0DF3 => Script::Sinhala,
0x0DF4 => Script::Sinhala,
0x111E1..=0x111F4 => Script::Sinhala,
0x0E01..=0x0E30 => Script::Thai,
0x0E31 => Script::Thai,
0x0E32..=0x0E33 => Script::Thai,
0x0E34..=0x0E3A => Script::Thai,
0x0E40..=0x0E45 => Script::Thai,
0x0E46 => Script::Thai,
0x0E47..=0x0E4E => Script::Thai,
0x0E4F => Script::Thai,
0x0E50..=0x0E59 => Script::Thai,
0x0E5A..=0x0E5B => Script::Thai,
0x0E81..=0x0E82 => Script::Lao,
0x0E84 => Script::Lao,
0x0E87..=0x0E88 => Script::Lao,
0x0E8A => Script::Lao,
0x0E8D => Script::Lao,
0x0E94..=0x0E97 => Script::Lao,
0x0E99..=0x0E9F => Script::Lao,
0x0EA1..=0x0EA3 => Script::Lao,
0x0EA5 => Script::Lao,
0x0EA7 => Script::Lao,
0x0EAA..=0x0EAB => Script::Lao,
0x0EAD..=0x0EB0 => Script::Lao,
0x0EB1 => Script::Lao,
0x0EB2..=0x0EB3 => Script::Lao,
0x0EB4..=0x0EB9 => Script::Lao,
0x0EBB..=0x0EBC => Script::Lao,
0x0EBD => Script::Lao,
0x0EC0..=0x0EC4 => Script::Lao,
0x0EC6 => Script::Lao,
0x0EC8..=0x0ECD => Script::Lao,
0x0ED0..=0x0ED9 => Script::Lao,
0x0EDC..=0x0EDF => Script::Lao,
0x0F00 => Script::Tibetan,
0x0F01..=0x0F03 => Script::Tibetan,
0x0F04..=0x0F12 => Script::Tibetan,
0x0F13 => Script::Tibetan,
0x0F14 => Script::Tibetan,
0x0F15..=0x0F17 => Script::Tibetan,
0x0F18..=0x0F19 => Script::Tibetan,
0x0F1A..=0x0F1F => Script::Tibetan,
0x0F20..=0x0F29 => Script::Tibetan,
0x0F2A..=0x0F33 => Script::Tibetan,
0x0F34 => Script::Tibetan,
0x0F35 => Script::Tibetan,
0x0F36 => Script::Tibetan,
0x0F37 => Script::Tibetan,
0x0F38 => Script::Tibetan,
0x0F39 => Script::Tibetan,
0x0F3A => Script::Tibetan,
0x0F3B => Script::Tibetan,
0x0F3C => Script::Tibetan,
0x0F3D => Script::Tibetan,
0x0F3E..=0x0F3F => Script::Tibetan,
0x0F40..=0x0F47 => Script::Tibetan,
0x0F49..=0x0F6C => Script::Tibetan,
0x0F71..=0x0F7E => Script::Tibetan,
0x0F7F => Script::Tibetan,
0x0F80..=0x0F84 => Script::Tibetan,
0x0F85 => Script::Tibetan,
0x0F86..=0x0F87 => Script::Tibetan,
0x0F88..=0x0F8C => Script::Tibetan,
0x0F8D..=0x0F97 => Script::Tibetan,
0x0F99..=0x0FBC => Script::Tibetan,
0x0FBE..=0x0FC5 => Script::Tibetan,
0x0FC6 => Script::Tibetan,
0x0FC7..=0x0FCC => Script::Tibetan,
0x0FCE..=0x0FCF => Script::Tibetan,
0x0FD0..=0x0FD4 => Script::Tibetan,
0x0FD9..=0x0FDA => Script::Tibetan,
0x1000..=0x102A => Script::Myanmar,
0x102B..=0x102C => Script::Myanmar,
0x102D..=0x1030 => Script::Myanmar,
0x1031 => Script::Myanmar,
0x1032..=0x1037 => Script::Myanmar,
0x1038 => Script::Myanmar,
0x1039..=0x103A => Script::Myanmar,
0x103B..=0x103C => Script::Myanmar,
0x103D..=0x103E => Script::Myanmar,
0x103F => Script::Myanmar,
0x1040..=0x1049 => Script::Myanmar,
0x104A..=0x104F => Script::Myanmar,
0x1050..=0x1055 => Script::Myanmar,
0x1056..=0x1057 => Script::Myanmar,
0x1058..=0x1059 => Script::Myanmar,
0x105A..=0x105D => Script::Myanmar,
0x105E..=0x1060 => Script::Myanmar,
0x1061 => Script::Myanmar,
0x1062..=0x1064 => Script::Myanmar,
0x1065..=0x1066 => Script::Myanmar,
0x1067..=0x106D => Script::Myanmar,
0x106E..=0x1070 => Script::Myanmar,
0x1071..=0x1074 => Script::Myanmar,
0x1075..=0x1081 => Script::Myanmar,
0x1082 => Script::Myanmar,
0x1083..=0x1084 => Script::Myanmar,
0x1085..=0x1086 => Script::Myanmar,
0x1087..=0x108C => Script::Myanmar,
0x108D => Script::Myanmar,
0x108E => Script::Myanmar,
0x108F => Script::Myanmar,
0x1090..=0x1099 => Script::Myanmar,
0x109A..=0x109C => Script::Myanmar,
0x109D => Script::Myanmar,
0x109E..=0x109F => Script::Myanmar,
0xA9E0..=0xA9E4 => Script::Myanmar,
0xA9E5 => Script::Myanmar,
0xA9E6 => Script::Myanmar,
0xA9E7..=0xA9EF => Script::Myanmar,
0xA9F0..=0xA9F9 => Script::Myanmar,
0xA9FA..=0xA9FE => Script::Myanmar,
0xAA60..=0xAA6F => Script::Myanmar,
0xAA70 => Script::Myanmar,
0xAA71..=0xAA76 => Script::Myanmar,
0xAA77..=0xAA79 => Script::Myanmar,
0xAA7A => Script::Myanmar,
0xAA7B => Script::Myanmar,
0xAA7C => Script::Myanmar,
0xAA7D => Script::Myanmar,
0xAA7E..=0xAA7F => Script::Myanmar,
0x10A0..=0x10C5 => Script::Georgian,
0x10C7 => Script::Georgian,
0x10CD => Script::Georgian,
0x10D0..=0x10FA => Script::Georgian,
0x10FC => Script::Georgian,
0x10FD..=0x10FF => Script::Georgian,
0x2D00..=0x2D25 => Script::Georgian,
0x2D27 => Script::Georgian,
0x2D2D => Script::Georgian,
0x1100..=0x11FF => Script::Hangul,
0x302E..=0x302F => Script::Hangul,
0x3131..=0x318E => Script::Hangul,
0x3200..=0x321E => Script::Hangul,
0x3260..=0x327E => Script::Hangul,
0xA960..=0xA97C => Script::Hangul,
0xAC00..=0xD7A3 => Script::Hangul,
0xD7B0..=0xD7C6 => Script::Hangul,
0xD7CB..=0xD7FB => Script::Hangul,
0xFFA0..=0xFFBE => Script::Hangul,
0xFFC2..=0xFFC7 => Script::Hangul,
0xFFCA..=0xFFCF => Script::Hangul,
0xFFD2..=0xFFD7 => Script::Hangul,
0xFFDA..=0xFFDC => Script::Hangul,
0x1200..=0x1248 => Script::Ethiopic,
0x124A..=0x124D => Script::Ethiopic,
0x1250..=0x1256 => Script::Ethiopic,
0x1258 => Script::Ethiopic,
0x125A..=0x125D => Script::Ethiopic,
0x1260..=0x1288 => Script::Ethiopic,
0x128A..=0x128D => Script::Ethiopic,
0x1290..=0x12B0 => Script::Ethiopic,
0x12B2..=0x12B5 => Script::Ethiopic,
0x12B8..=0x12BE => Script::Ethiopic,
0x12C0 => Script::Ethiopic,
0x12C2..=0x12C5 => Script::Ethiopic,
0x12C8..=0x12D6 => Script::Ethiopic,
0x12D8..=0x1310 => Script::Ethiopic,
0x1312..=0x1315 => Script::Ethiopic,
0x1318..=0x135A => Script::Ethiopic,
0x135D..=0x135F => Script::Ethiopic,
0x1360..=0x1368 => Script::Ethiopic,
0x1369..=0x137C => Script::Ethiopic,
0x1380..=0x138F => Script::Ethiopic,
0x1390..=0x1399 => Script::Ethiopic,
0x2D80..=0x2D96 => Script::Ethiopic,
0x2DA0..=0x2DA6 => Script::Ethiopic,
0x2DA8..=0x2DAE => Script::Ethiopic,
0x2DB0..=0x2DB6 => Script::Ethiopic,
0x2DB8..=0x2DBE => Script::Ethiopic,
0x2DC0..=0x2DC6 => Script::Ethiopic,
0x2DC8..=0x2DCE => Script::Ethiopic,
0x2DD0..=0x2DD6 => Script::Ethiopic,
0x2DD8..=0x2DDE => Script::Ethiopic,
0xAB01..=0xAB06 => Script::Ethiopic,
0xAB09..=0xAB0E => Script::Ethiopic,
0xAB11..=0xAB16 => Script::Ethiopic,
0xAB20..=0xAB26 => Script::Ethiopic,
0xAB28..=0xAB2E => Script::Ethiopic,
0x13A0..=0x13F5 => Script::Cherokee,
0x13F8..=0x13FD => Script::Cherokee,
0xAB70..=0xABBF => Script::Cherokee,
0x1400 => Script::CanadianAboriginal,
0x1401..=0x166C => Script::CanadianAboriginal,
0x166D..=0x166E => Script::CanadianAboriginal,
0x166F..=0x167F => Script::CanadianAboriginal,
0x18B0..=0x18F5 => Script::CanadianAboriginal,
0x1680 => Script::Ogham,
0x1681..=0x169A => Script::Ogham,
0x169B => Script::Ogham,
0x169C => Script::Ogham,
0x16A0..=0x16EA => Script::Runic,
0x16EE..=0x16F0 => Script::Runic,
0x16F1..=0x16F8 => Script::Runic,
0x1780..=0x17B3 => Script::Khmer,
0x17B4..=0x17B5 => Script::Khmer,
0x17B6 => Script::Khmer,
0x17B7..=0x17BD => Script::Khmer,
0x17BE..=0x17C5 => Script::Khmer,
0x17C6 => Script::Khmer,
0x17C7..=0x17C8 => Script::Khmer,
0x17C9..=0x17D3 => Script::Khmer,
0x17D4..=0x17D6 => Script::Khmer,
0x17D7 => Script::Khmer,
0x17D8..=0x17DA => Script::Khmer,
0x17DB => Script::Khmer,
0x17DC => Script::Khmer,
0x17DD => Script::Khmer,
0x17E0..=0x17E9 => Script::Khmer,
0x17F0..=0x17F9 => Script::Khmer,
0x19E0..=0x19FF => Script::Khmer,
0x1800..=0x1801 => Script::Mongolian,
0x1804 => Script::Mongolian,
0x1806 => Script::Mongolian,
0x1807..=0x180A => Script::Mongolian,
0x180B..=0x180D => Script::Mongolian,
0x180E => Script::Mongolian,
0x1810..=0x1819 => Script::Mongolian,
0x1820..=0x1842 => Script::Mongolian,
0x1843 => Script::Mongolian,
0x1844..=0x1877 => Script::Mongolian,
0x1880..=0x1884 => Script::Mongolian,
0x1885..=0x1886 => Script::Mongolian,
0x1887..=0x18A8 => Script::Mongolian,
0x18A9 => Script::Mongolian,
0x18AA => Script::Mongolian,
0x11660..=0x1166C => Script::Mongolian,
0x3041..=0x3096 => Script::Hiragana,
0x309D..=0x309E => Script::Hiragana,
0x309F => Script::Hiragana,
0x1B001 => Script::Hiragana,
0x1F200 => Script::Hiragana,
0x30A1..=0x30FA => Script::Katakana,
0x30FD..=0x30FE => Script::Katakana,
0x30FF => Script::Katakana,
0x31F0..=0x31FF => Script::Katakana,
0x32D0..=0x32FE => Script::Katakana,
0x3300..=0x3357 => Script::Katakana,
0xFF66..=0xFF6F => Script::Katakana,
0xFF71..=0xFF9D => Script::Katakana,
0x1B000 => Script::Katakana,
0x02EA..=0x02EB => Script::Bopomofo,
0x3105..=0x312D => Script::Bopomofo,
0x31A0..=0x31BA => Script::Bopomofo,
0x2E80..=0x2E99 => Script::Han,
0x2E9B..=0x2EF3 => Script::Han,
0x2F00..=0x2FD5 => Script::Han,
0x3005 => Script::Han,
0x3007 => Script::Han,
0x3021..=0x3029 => Script::Han,
0x3038..=0x303A => Script::Han,
0x303B => Script::Han,
0x3400..=0x4DB5 => Script::Han,
0x4E00..=0x9FD5 => Script::Han,
0xF900..=0xFA6D => Script::Han,
0xFA70..=0xFAD9 => Script::Han,
0x20000..=0x2A6D6 => Script::Han,
0x2A700..=0x2B734 => Script::Han,
0x2B740..=0x2B81D => Script::Han,
0x2B820..=0x2CEA1 => Script::Han,
0x2F800..=0x2FA1D => Script::Han,
0xA000..=0xA014 => Script::Yi,
0xA015 => Script::Yi,
0xA016..=0xA48C => Script::Yi,
0xA490..=0xA4C6 => Script::Yi,
0x10300..=0x1031F => Script::OldItalic,
0x10320..=0x10323 => Script::OldItalic,
0x10330..=0x10340 => Script::Gothic,
0x10341 => Script::Gothic,
0x10342..=0x10349 => Script::Gothic,
0x1034A => Script::Gothic,
0x10400..=0x1044F => Script::Deseret,
0x0300..=0x036F => Script::Inherited,
0x0485..=0x0486 => Script::Inherited,
0x064B..=0x0655 => Script::Inherited,
0x0670 => Script::Inherited,
0x0951..=0x0952 => Script::Inherited,
0x1AB0..=0x1ABD => Script::Inherited,
0x1ABE => Script::Inherited,
0x1CD0..=0x1CD2 => Script::Inherited,
0x1CD4..=0x1CE0 => Script::Inherited,
0x1CE2..=0x1CE8 => Script::Inherited,
0x1CED => Script::Inherited,
0x1CF4 => Script::Inherited,
0x1CF8..=0x1CF9 => Script::Inherited,
0x1DC0..=0x1DF5 => Script::Inherited,
0x1DFB..=0x1DFF => Script::Inherited,
0x200C..=0x200D => Script::Inherited,
0x20D0..=0x20DC => Script::Inherited,
0x20DD..=0x20E0 => Script::Inherited,
0x20E1 => Script::Inherited,
0x20E2..=0x20E4 => Script::Inherited,
0x20E5..=0x20F0 => Script::Inherited,
0x302A..=0x302D => Script::Inherited,
0x3099..=0x309A => Script::Inherited,
0xFE00..=0xFE0F => Script::Inherited,
0xFE20..=0xFE2D => Script::Inherited,
0x101FD => Script::Inherited,
0x102E0 => Script::Inherited,
0x1D167..=0x1D169 => Script::Inherited,
0x1D17B..=0x1D182 => Script::Inherited,
0x1D185..=0x1D18B => Script::Inherited,
0x1D1AA..=0x1D1AD => Script::Inherited,
0xE0100..=0xE01EF => Script::Inherited,
0x1700..=0x170C => Script::Tagalog,
0x170E..=0x1711 => Script::Tagalog,
0x1712..=0x1714 => Script::Tagalog,
0x1720..=0x1731 => Script::Hanunoo,
0x1732..=0x1734 => Script::Hanunoo,
0x1740..=0x1751 => Script::Buhid,
0x1752..=0x1753 => Script::Buhid,
0x1760..=0x176C => Script::Tagbanwa,
0x176E..=0x1770 => Script::Tagbanwa,
0x1772..=0x1773 => Script::Tagbanwa,
0x1900..=0x191E => Script::Limbu,
0x1920..=0x1922 => Script::Limbu,
0x1923..=0x1926 => Script::Limbu,
0x1927..=0x1928 => Script::Limbu,
0x1929..=0x192B => Script::Limbu,
0x1930..=0x1931 => Script::Limbu,
0x1932 => Script::Limbu,
0x1933..=0x1938 => Script::Limbu,
0x1939..=0x193B => Script::Limbu,
0x1940 => Script::Limbu,
0x1944..=0x1945 => Script::Limbu,
0x1946..=0x194F => Script::Limbu,
0x1950..=0x196D => Script::TaiLe,
0x1970..=0x1974 => Script::TaiLe,
0x10000..=0x1000B => Script::LinearB,
0x1000D..=0x10026 => Script::LinearB,
0x10028..=0x1003A => Script::LinearB,
0x1003C..=0x1003D => Script::LinearB,
0x1003F..=0x1004D => Script::LinearB,
0x10050..=0x1005D => Script::LinearB,
0x10080..=0x100FA => Script::LinearB,
0x10380..=0x1039D => Script::Ugaritic,
0x1039F => Script::Ugaritic,
0x10450..=0x1047F => Script::Shavian,
0x10480..=0x1049D => Script::Osmanya,
0x104A0..=0x104A9 => Script::Osmanya,
0x10800..=0x10805 => Script::Cypriot,
0x10808 => Script::Cypriot,
0x1080A..=0x10835 => Script::Cypriot,
0x10837..=0x10838 => Script::Cypriot,
0x1083C => Script::Cypriot,
0x1083F => Script::Cypriot,
0x2800..=0x28FF => Script::Braille,
0x1A00..=0x1A16 => Script::Buginese,
0x1A17..=0x1A18 => Script::Buginese,
0x1A19..=0x1A1A => Script::Buginese,
0x1A1B => Script::Buginese,
0x1A1E..=0x1A1F => Script::Buginese,
0x03E2..=0x03EF => Script::Coptic,
0x2C80..=0x2CE4 => Script::Coptic,
0x2CE5..=0x2CEA => Script::Coptic,
0x2CEB..=0x2CEE => Script::Coptic,
0x2CEF..=0x2CF1 => Script::Coptic,
0x2CF2..=0x2CF3 => Script::Coptic,
0x2CF9..=0x2CFC => Script::Coptic,
0x2CFD => Script::Coptic,
0x2CFE..=0x2CFF => Script::Coptic,
0x1980..=0x19AB => Script::NewTaiLue,
0x19B0..=0x19C9 => Script::NewTaiLue,
0x19D0..=0x19D9 => Script::NewTaiLue,
0x19DA => Script::NewTaiLue,
0x19DE..=0x19DF => Script::NewTaiLue,
0x2C00..=0x2C2E => Script::Glagolitic,
0x2C30..=0x2C5E => Script::Glagolitic,
0x1E000..=0x1E006 => Script::Glagolitic,
0x1E008..=0x1E018 => Script::Glagolitic,
0x1E01B..=0x1E021 => Script::Glagolitic,
0x1E023..=0x1E024 => Script::Glagolitic,
0x1E026..=0x1E02A => Script::Glagolitic,
0x2D30..=0x2D67 => Script::Tifinagh,
0x2D6F => Script::Tifinagh,
0x2D70 => Script::Tifinagh,
0x2D7F => Script::Tifinagh,
0xA800..=0xA801 => Script::SylotiNagri,
0xA802 => Script::SylotiNagri,
0xA803..=0xA805 => Script::SylotiNagri,
0xA806 => Script::SylotiNagri,
0xA807..=0xA80A => Script::SylotiNagri,
0xA80B => Script::SylotiNagri,
0xA80C..=0xA822 => Script::SylotiNagri,
0xA823..=0xA824 => Script::SylotiNagri,
0xA825..=0xA826 => Script::SylotiNagri,
0xA827 => Script::SylotiNagri,
0xA828..=0xA82B => Script::SylotiNagri,
0x103A0..=0x103C3 => Script::OldPersian,
0x103C8..=0x103CF => Script::OldPersian,
0x103D0 => Script::OldPersian,
0x103D1..=0x103D5 => Script::OldPersian,
0x10A00 => Script::Kharoshthi,
0x10A01..=0x10A03 => Script::Kharoshthi,
0x10A05..=0x10A06 => Script::Kharoshthi,
0x10A0C..=0x10A0F => Script::Kharoshthi,
0x10A10..=0x10A13 => Script::Kharoshthi,
0x10A15..=0x10A17 => Script::Kharoshthi,
0x10A19..=0x10A33 => Script::Kharoshthi,
0x10A38..=0x10A3A => Script::Kharoshthi,
0x10A3F => Script::Kharoshthi,
0x10A40..=0x10A47 => Script::Kharoshthi,
0x10A50..=0x10A58 => Script::Kharoshthi,
0x1B00..=0x1B03 => Script::Balinese,
0x1B04 => Script::Balinese,
0x1B05..=0x1B33 => Script::Balinese,
0x1B34 => Script::Balinese,
0x1B35 => Script::Balinese,
0x1B36..=0x1B3A => Script::Balinese,
0x1B3B => Script::Balinese,
0x1B3C => Script::Balinese,
0x1B3D..=0x1B41 => Script::Balinese,
0x1B42 => Script::Balinese,
0x1B43..=0x1B44 => Script::Balinese,
0x1B45..=0x1B4B => Script::Balinese,
0x1B50..=0x1B59 => Script::Balinese,
0x1B5A..=0x1B60 => Script::Balinese,
0x1B61..=0x1B6A => Script::Balinese,
0x1B6B..=0x1B73 => Script::Balinese,
0x1B74..=0x1B7C => Script::Balinese,
0x12000..=0x12399 => Script::Cuneiform,
0x12400..=0x1246E => Script::Cuneiform,
0x12470..=0x12474 => Script::Cuneiform,
0x12480..=0x12543 => Script::Cuneiform,
0x10900..=0x10915 => Script::Phoenician,
0x10916..=0x1091B => Script::Phoenician,
0x1091F => Script::Phoenician,
0xA840..=0xA873 => Script::PhagsPa,
0xA874..=0xA877 => Script::PhagsPa,
0x07C0..=0x07C9 => Script::Nko,
0x07CA..=0x07EA => Script::Nko,
0x07EB..=0x07F3 => Script::Nko,
0x07F4..=0x07F5 => Script::Nko,
0x07F6 => Script::Nko,
0x07F7..=0x07F9 => Script::Nko,
0x07FA => Script::Nko,
0x1B80..=0x1B81 => Script::Sundanese,
0x1B82 => Script::Sundanese,
0x1B83..=0x1BA0 => Script::Sundanese,
0x1BA1 => Script::Sundanese,
0x1BA2..=0x1BA5 => Script::Sundanese,
0x1BA6..=0x1BA7 => Script::Sundanese,
0x1BA8..=0x1BA9 => Script::Sundanese,
0x1BAA => Script::Sundanese,
0x1BAB..=0x1BAD => Script::Sundanese,
0x1BAE..=0x1BAF => Script::Sundanese,
0x1BB0..=0x1BB9 => Script::Sundanese,
0x1BBA..=0x1BBF => Script::Sundanese,
0x1CC0..=0x1CC7 => Script::Sundanese,
0x1C00..=0x1C23 => Script::Lepcha,
0x1C24..=0x1C2B => Script::Lepcha,
0x1C2C..=0x1C33 => Script::Lepcha,
0x1C34..=0x1C35 => Script::Lepcha,
0x1C36..=0x1C37 => Script::Lepcha,
0x1C3B..=0x1C3F => Script::Lepcha,
0x1C40..=0x1C49 => Script::Lepcha,
0x1C4D..=0x1C4F => Script::Lepcha,
0x1C50..=0x1C59 => Script::OlChiki,
0x1C5A..=0x1C77 => Script::OlChiki,
0x1C78..=0x1C7D => Script::OlChiki,
0x1C7E..=0x1C7F => Script::OlChiki,
0xA500..=0xA60B => Script::Vai,
0xA60C => Script::Vai,
0xA60D..=0xA60F => Script::Vai,
0xA610..=0xA61F => Script::Vai,
0xA620..=0xA629 => Script::Vai,
0xA62A..=0xA62B => Script::Vai,
0xA880..=0xA881 => Script::Saurashtra,
0xA882..=0xA8B3 => Script::Saurashtra,
0xA8B4..=0xA8C3 => Script::Saurashtra,
0xA8C4..=0xA8C5 => Script::Saurashtra,
0xA8CE..=0xA8CF => Script::Saurashtra,
0xA8D0..=0xA8D9 => Script::Saurashtra,
0xA900..=0xA909 => Script::KayahLi,
0xA90A..=0xA925 => Script::KayahLi,
0xA926..=0xA92D => Script::KayahLi,
0xA92F => Script::KayahLi,
0xA930..=0xA946 => Script::Rejang,
0xA947..=0xA951 => Script::Rejang,
0xA952..=0xA953 => Script::Rejang,
0xA95F => Script::Rejang,
0x10280..=0x1029C => Script::Lycian,
0x102A0..=0x102D0 => Script::Carian,
0x10920..=0x10939 => Script::Lydian,
0x1093F => Script::Lydian,
0xAA00..=0xAA28 => Script::Cham,
0xAA29..=0xAA2E => Script::Cham,
0xAA2F..=0xAA30 => Script::Cham,
0xAA31..=0xAA32 => Script::Cham,
0xAA33..=0xAA34 => Script::Cham,
0xAA35..=0xAA36 => Script::Cham,
0xAA40..=0xAA42 => Script::Cham,
0xAA43 => Script::Cham,
0xAA44..=0xAA4B => Script::Cham,
0xAA4C => Script::Cham,
0xAA4D => Script::Cham,
0xAA50..=0xAA59 => Script::Cham,
0xAA5C..=0xAA5F => Script::Cham,
0x1A20..=0x1A54 => Script::TaiTham,
0x1A55 => Script::TaiTham,
0x1A56 => Script::TaiTham,
0x1A57 => Script::TaiTham,
0x1A58..=0x1A5E => Script::TaiTham,
0x1A60 => Script::TaiTham,
0x1A61 => Script::TaiTham,
0x1A62 => Script::TaiTham,
0x1A63..=0x1A64 => Script::TaiTham,
0x1A65..=0x1A6C => Script::TaiTham,
0x1A6D..=0x1A72 => Script::TaiTham,
0x1A73..=0x1A7C => Script::TaiTham,
0x1A7F => Script::TaiTham,
0x1A80..=0x1A89 => Script::TaiTham,
0x1A90..=0x1A99 => Script::TaiTham,
0x1AA0..=0x1AA6 => Script::TaiTham,
0x1AA7 => Script::TaiTham,
0x1AA8..=0x1AAD => Script::TaiTham,
0xAA80..=0xAAAF => Script::TaiViet,
0xAAB0 => Script::TaiViet,
0xAAB1 => Script::TaiViet,
0xAAB2..=0xAAB4 => Script::TaiViet,
0xAAB5..=0xAAB6 => Script::TaiViet,
0xAAB7..=0xAAB8 => Script::TaiViet,
0xAAB9..=0xAABD => Script::TaiViet,
0xAABE..=0xAABF => Script::TaiViet,
0xAAC0 => Script::TaiViet,
0xAAC1 => Script::TaiViet,
0xAAC2 => Script::TaiViet,
0xAADB..=0xAADC => Script::TaiViet,
0xAADD => Script::TaiViet,
0xAADE..=0xAADF => Script::TaiViet,
0x10B00..=0x10B35 => Script::Avestan,
0x10B39..=0x10B3F => Script::Avestan,
0x13000..=0x1342E => Script::EgyptianHieroglyphs,
0x0800..=0x0815 => Script::Samaritan,
0x0816..=0x0819 => Script::Samaritan,
0x081A => Script::Samaritan,
0x081B..=0x0823 => Script::Samaritan,
0x0824 => Script::Samaritan,
0x0825..=0x0827 => Script::Samaritan,
0x0828 => Script::Samaritan,
0x0829..=0x082D => Script::Samaritan,
0x0830..=0x083E => Script::Samaritan,
0xA4D0..=0xA4F7 => Script::Lisu,
0xA4F8..=0xA4FD => Script::Lisu,
0xA4FE..=0xA4FF => Script::Lisu,
0xA6A0..=0xA6E5 => Script::Bamum,
0xA6E6..=0xA6EF => Script::Bamum,
0xA6F0..=0xA6F1 => Script::Bamum,
0xA6F2..=0xA6F7 => Script::Bamum,
0x16800..=0x16A38 => Script::Bamum,
0xA980..=0xA982 => Script::Javanese,
0xA983 => Script::Javanese,
0xA984..=0xA9B2 => Script::Javanese,
0xA9B3 => Script::Javanese,
0xA9B4..=0xA9B5 => Script::Javanese,
0xA9B6..=0xA9B9 => Script::Javanese,
0xA9BA..=0xA9BB => Script::Javanese,
0xA9BC => Script::Javanese,
0xA9BD..=0xA9C0 => Script::Javanese,
0xA9C1..=0xA9CD => Script::Javanese,
0xA9D0..=0xA9D9 => Script::Javanese,
0xA9DE..=0xA9DF => Script::Javanese,
0xAAE0..=0xAAEA => Script::MeeteiMayek,
0xAAEB => Script::MeeteiMayek,
0xAAEC..=0xAAED => Script::MeeteiMayek,
0xAAEE..=0xAAEF => Script::MeeteiMayek,
0xAAF0..=0xAAF1 => Script::MeeteiMayek,
0xAAF2 => Script::MeeteiMayek,
0xAAF3..=0xAAF4 => Script::MeeteiMayek,
0xAAF5 => Script::MeeteiMayek,
0xAAF6 => Script::MeeteiMayek,
0xABC0..=0xABE2 => Script::MeeteiMayek,
0xABE3..=0xABE4 => Script::MeeteiMayek,
0xABE5 => Script::MeeteiMayek,
0xABE6..=0xABE7 => Script::MeeteiMayek,
0xABE8 => Script::MeeteiMayek,
0xABE9..=0xABEA => Script::MeeteiMayek,
0xABEB => Script::MeeteiMayek,
0xABEC => Script::MeeteiMayek,
0xABED => Script::MeeteiMayek,
0xABF0..=0xABF9 => Script::MeeteiMayek,
0x10840..=0x10855 => Script::ImperialAramaic,
0x10857 => Script::ImperialAramaic,
0x10858..=0x1085F => Script::ImperialAramaic,
0x10A60..=0x10A7C => Script::OldSouthArabian,
0x10A7D..=0x10A7E => Script::OldSouthArabian,
0x10A7F => Script::OldSouthArabian,
0x10B40..=0x10B55 => Script::InscriptionalParthian,
0x10B58..=0x10B5F => Script::InscriptionalParthian,
0x10B60..=0x10B72 => Script::InscriptionalPahlavi,
0x10B78..=0x10B7F => Script::InscriptionalPahlavi,
0x10C00..=0x10C48 => Script::OldTurkic,
0x11080..=0x11081 => Script::Kaithi,
0x11082 => Script::Kaithi,
0x11083..=0x110AF => Script::Kaithi,
0x110B0..=0x110B2 => Script::Kaithi,
0x110B3..=0x110B6 => Script::Kaithi,
0x110B7..=0x110B8 => Script::Kaithi,
0x110B9..=0x110BA => Script::Kaithi,
0x110BB..=0x110BC => Script::Kaithi,
0x110BD => Script::Kaithi,
0x110BE..=0x110C1 => Script::Kaithi,
0x1BC0..=0x1BE5 => Script::Batak,
0x1BE6 => Script::Batak,
0x1BE7 => Script::Batak,
0x1BE8..=0x1BE9 => Script::Batak,
0x1BEA..=0x1BEC => Script::Batak,
0x1BED => Script::Batak,
0x1BEE => Script::Batak,
0x1BEF..=0x1BF1 => Script::Batak,
0x1BF2..=0x1BF3 => Script::Batak,
0x1BFC..=0x1BFF => Script::Batak,
0x11000 => Script::Brahmi,
0x11001 => Script::Brahmi,
0x11002 => Script::Brahmi,
0x11003..=0x11037 => Script::Brahmi,
0x11038..=0x11046 => Script::Brahmi,
0x11047..=0x1104D => Script::Brahmi,
0x11052..=0x11065 => Script::Brahmi,
0x11066..=0x1106F => Script::Brahmi,
0x1107F => Script::Brahmi,
0x0840..=0x0858 => Script::Mandaic,
0x0859..=0x085B => Script::Mandaic,
0x085E => Script::Mandaic,
0x11100..=0x11102 => Script::Chakma,
0x11103..=0x11126 => Script::Chakma,
0x11127..=0x1112B => Script::Chakma,
0x1112C => Script::Chakma,
0x1112D..=0x11134 => Script::Chakma,
0x11136..=0x1113F => Script::Chakma,
0x11140..=0x11143 => Script::Chakma,
0x109A0..=0x109B7 => Script::MeroiticCursive,
0x109BC..=0x109BD => Script::MeroiticCursive,
0x109BE..=0x109BF => Script::MeroiticCursive,
0x109C0..=0x109CF => Script::MeroiticCursive,
0x109D2..=0x109FF => Script::MeroiticCursive,
0x10980..=0x1099F => Script::MeroiticHieroglyphs,
0x16F00..=0x16F44 => Script::Miao,
0x16F50 => Script::Miao,
0x16F51..=0x16F7E => Script::Miao,
0x16F8F..=0x16F92 => Script::Miao,
0x16F93..=0x16F9F => Script::Miao,
0x11180..=0x11181 => Script::Sharada,
0x11182 => Script::Sharada,
0x11183..=0x111B2 => Script::Sharada,
0x111B3..=0x111B5 => Script::Sharada,
0x111B6..=0x111BE => Script::Sharada,
0x111BF..=0x111C0 => Script::Sharada,
0x111C1..=0x111C4 => Script::Sharada,
0x111C5..=0x111C9 => Script::Sharada,
0x111CA..=0x111CC => Script::Sharada,
0x111CD => Script::Sharada,
0x111D0..=0x111D9 => Script::Sharada,
0x111DA => Script::Sharada,
0x111DB => Script::Sharada,
0x111DC => Script::Sharada,
0x111DD..=0x111DF => Script::Sharada,
0x110D0..=0x110E8 => Script::SoraSompeng,
0x110F0..=0x110F9 => Script::SoraSompeng,
0x11680..=0x116AA => Script::Takri,
0x116AB => Script::Takri,
0x116AC => Script::Takri,
0x116AD => Script::Takri,
0x116AE..=0x116AF => Script::Takri,
0x116B0..=0x116B5 => Script::Takri,
0x116B6 => Script::Takri,
0x116B7 => Script::Takri,
0x116C0..=0x116C9 => Script::Takri,
0x10530..=0x10563 => Script::CaucasianAlbanian,
0x1056F => Script::CaucasianAlbanian,
0x16AD0..=0x16AED => Script::BassaVah,
0x16AF0..=0x16AF4 => Script::BassaVah,
0x16AF5 => Script::BassaVah,
0x1BC00..=0x1BC6A => Script::Duployan,
0x1BC70..=0x1BC7C => Script::Duployan,
0x1BC80..=0x1BC88 => Script::Duployan,
0x1BC90..=0x1BC99 => Script::Duployan,
0x1BC9C => Script::Duployan,
0x1BC9D..=0x1BC9E => Script::Duployan,
0x1BC9F => Script::Duployan,
0x10500..=0x10527 => Script::Elbasan,
0x11300..=0x11301 => Script::Grantha,
0x11302..=0x11303 => Script::Grantha,
0x11305..=0x1130C => Script::Grantha,
0x1130F..=0x11310 => Script::Grantha,
0x11313..=0x11328 => Script::Grantha,
0x1132A..=0x11330 => Script::Grantha,
0x11332..=0x11333 => Script::Grantha,
0x11335..=0x11339 => Script::Grantha,
0x1133C => Script::Grantha,
0x1133D => Script::Grantha,
0x1133E..=0x1133F => Script::Grantha,
0x11340 => Script::Grantha,
0x11341..=0x11344 => Script::Grantha,
0x11347..=0x11348 => Script::Grantha,
0x1134B..=0x1134D => Script::Grantha,
0x11350 => Script::Grantha,
0x11357 => Script::Grantha,
0x1135D..=0x11361 => Script::Grantha,
0x11362..=0x11363 => Script::Grantha,
0x11366..=0x1136C => Script::Grantha,
0x11370..=0x11374 => Script::Grantha,
0x16B00..=0x16B2F => Script::PahawhHmong,
0x16B30..=0x16B36 => Script::PahawhHmong,
0x16B37..=0x16B3B => Script::PahawhHmong,
0x16B3C..=0x16B3F => Script::PahawhHmong,
0x16B40..=0x16B43 => Script::PahawhHmong,
0x16B44 => Script::PahawhHmong,
0x16B45 => Script::PahawhHmong,
0x16B50..=0x16B59 => Script::PahawhHmong,
0x16B5B..=0x16B61 => Script::PahawhHmong,
0x16B63..=0x16B77 => Script::PahawhHmong,
0x16B7D..=0x16B8F => Script::PahawhHmong,
0x11200..=0x11211 => Script::Khojki,
0x11213..=0x1122B => Script::Khojki,
0x1122C..=0x1122E => Script::Khojki,
0x1122F..=0x11231 => Script::Khojki,
0x11232..=0x11233 => Script::Khojki,
0x11234 => Script::Khojki,
0x11235 => Script::Khojki,
0x11236..=0x11237 => Script::Khojki,
0x11238..=0x1123D => Script::Khojki,
0x1123E => Script::Khojki,
0x10600..=0x10736 => Script::LinearA,
0x10740..=0x10755 => Script::LinearA,
0x10760..=0x10767 => Script::LinearA,
0x11150..=0x11172 => Script::Mahajani,
0x11173 => Script::Mahajani,
0x11174..=0x11175 => Script::Mahajani,
0x11176 => Script::Mahajani,
0x10AC0..=0x10AC7 => Script::Manichaean,
0x10AC8 => Script::Manichaean,
0x10AC9..=0x10AE4 => Script::Manichaean,
0x10AE5..=0x10AE6 => Script::Manichaean,
0x10AEB..=0x10AEF => Script::Manichaean,
0x10AF0..=0x10AF6 => Script::Manichaean,
0x1E800..=0x1E8C4 => Script::MendeKikakui,
0x1E8C7..=0x1E8CF => Script::MendeKikakui,
0x1E8D0..=0x1E8D6 => Script::MendeKikakui,
0x11600..=0x1162F => Script::Modi,
0x11630..=0x11632 => Script::Modi,
0x11633..=0x1163A => Script::Modi,
0x1163B..=0x1163C => Script::Modi,
0x1163D => Script::Modi,
0x1163E => Script::Modi,
0x1163F..=0x11640 => Script::Modi,
0x11641..=0x11643 => Script::Modi,
0x11644 => Script::Modi,
0x11650..=0x11659 => Script::Modi,
0x16A40..=0x16A5E => Script::Mro,
0x16A60..=0x16A69 => Script::Mro,
0x16A6E..=0x16A6F => Script::Mro,
0x10A80..=0x10A9C => Script::OldNorthArabian,
0x10A9D..=0x10A9F => Script::OldNorthArabian,
0x10880..=0x1089E => Script::Nabataean,
0x108A7..=0x108AF => Script::Nabataean,
0x10860..=0x10876 => Script::Palmyrene,
0x10877..=0x10878 => Script::Palmyrene,
0x10879..=0x1087F => Script::Palmyrene,
0x11AC0..=0x11AF8 => Script::PauCinHau,
0x10350..=0x10375 => Script::OldPermic,
0x10376..=0x1037A => Script::OldPermic,
0x10B80..=0x10B91 => Script::PsalterPahlavi,
0x10B99..=0x10B9C => Script::PsalterPahlavi,
0x10BA9..=0x10BAF => Script::PsalterPahlavi,
0x11580..=0x115AE => Script::Siddham,
0x115AF..=0x115B1 => Script::Siddham,
0x115B2..=0x115B5 => Script::Siddham,
0x115B8..=0x115BB => Script::Siddham,
0x115BC..=0x115BD => Script::Siddham,
0x115BE => Script::Siddham,
0x115BF..=0x115C0 => Script::Siddham,
0x115C1..=0x115D7 => Script::Siddham,
0x115D8..=0x115DB => Script::Siddham,
0x115DC..=0x115DD => Script::Siddham,
0x112B0..=0x112DE => Script::Khudawadi,
0x112DF => Script::Khudawadi,
0x112E0..=0x112E2 => Script::Khudawadi,
0x112E3..=0x112EA => Script::Khudawadi,
0x112F0..=0x112F9 => Script::Khudawadi,
0x11480..=0x114AF => Script::Tirhuta,
0x114B0..=0x114B2 => Script::Tirhuta,
0x114B3..=0x114B8 => Script::Tirhuta,
0x114B9 => Script::Tirhuta,
0x114BA => Script::Tirhuta,
0x114BB..=0x114BE => Script::Tirhuta,
0x114BF..=0x114C0 => Script::Tirhuta,
0x114C1 => Script::Tirhuta,
0x114C2..=0x114C3 => Script::Tirhuta,
0x114C4..=0x114C5 => Script::Tirhuta,
0x114C6 => Script::Tirhuta,
0x114C7 => Script::Tirhuta,
0x114D0..=0x114D9 => Script::Tirhuta,
0x118A0..=0x118DF => Script::WarangCiti,
0x118E0..=0x118E9 => Script::WarangCiti,
0x118EA..=0x118F2 => Script::WarangCiti,
0x118FF => Script::WarangCiti,
0x11700..=0x11719 => Script::Ahom,
0x1171D..=0x1171F => Script::Ahom,
0x11720..=0x11721 => Script::Ahom,
0x11722..=0x11725 => Script::Ahom,
0x11726 => Script::Ahom,
0x11727..=0x1172B => Script::Ahom,
0x11730..=0x11739 => Script::Ahom,
0x1173A..=0x1173B => Script::Ahom,
0x1173C..=0x1173E => Script::Ahom,
0x1173F => Script::Ahom,
0x14400..=0x14646 => Script::AnatolianHieroglyphs,
0x108E0..=0x108F2 => Script::Hatran,
0x108F4..=0x108F5 => Script::Hatran,
0x108FB..=0x108FF => Script::Hatran,
0x11280..=0x11286 => Script::Multani,
0x11288 => Script::Multani,
0x1128A..=0x1128D => Script::Multani,
0x1128F..=0x1129D => Script::Multani,
0x1129F..=0x112A8 => Script::Multani,
0x112A9 => Script::Multani,
0x10C80..=0x10CB2 => Script::OldHungarian,
0x10CC0..=0x10CF2 => Script::OldHungarian,
0x10CFA..=0x10CFF => Script::OldHungarian,
0x1D800..=0x1D9FF => Script::SignWriting,
0x1DA00..=0x1DA36 => Script::SignWriting,
0x1DA37..=0x1DA3A => Script::SignWriting,
0x1DA3B..=0x1DA6C => Script::SignWriting,
0x1DA6D..=0x1DA74 => Script::SignWriting,
0x1DA75 => Script::SignWriting,
0x1DA76..=0x1DA83 => Script::SignWriting,
0x1DA84 => Script::SignWriting,
0x1DA85..=0x1DA86 => Script::SignWriting,
0x1DA87..=0x1DA8B => Script::SignWriting,
0x1DA9B..=0x1DA9F => Script::SignWriting,
0x1DAA1..=0x1DAAF => Script::SignWriting,
0x1E900..=0x1E943 => Script::Adlam,
0x1E944..=0x1E94A => Script::Adlam,
0x1E950..=0x1E959 => Script::Adlam,
0x1E95E..=0x1E95F => Script::Adlam,
0x11C00..=0x11C08 => Script::Bhaiksuki,
0x11C0A..=0x11C2E => Script::Bhaiksuki,
0x11C2F => Script::Bhaiksuki,
0x11C30..=0x11C36 => Script::Bhaiksuki,
0x11C38..=0x11C3D => Script::Bhaiksuki,
0x11C3E => Script::Bhaiksuki,
0x11C3F => Script::Bhaiksuki,
0x11C40 => Script::Bhaiksuki,
0x11C41..=0x11C45 => Script::Bhaiksuki,
0x11C50..=0x11C59 => Script::Bhaiksuki,
0x11C5A..=0x11C6C => Script::Bhaiksuki,
0x11C70..=0x11C71 => Script::Marchen,
0x11C72..=0x11C8F => Script::Marchen,
0x11C92..=0x11CA7 => Script::Marchen,
0x11CA9 => Script::Marchen,
0x11CAA..=0x11CB0 => Script::Marchen,
0x11CB1 => Script::Marchen,
0x11CB2..=0x11CB3 => Script::Marchen,
0x11CB4 => Script::Marchen,
0x11CB5..=0x11CB6 => Script::Marchen,
0x11400..=0x11434 => Script::Newa,
0x11435..=0x11437 => Script::Newa,
0x11438..=0x1143F => Script::Newa,
0x11440..=0x11441 => Script::Newa,
0x11442..=0x11444 => Script::Newa,
0x11445 => Script::Newa,
0x11446 => Script::Newa,
0x11447..=0x1144A => Script::Newa,
0x1144B..=0x1144F => Script::Newa,
0x11450..=0x11459 => Script::Newa,
0x1145B => Script::Newa,
0x1145D => Script::Newa,
0x104B0..=0x104D3 => Script::Osage,
0x104D8..=0x104FB => Script::Osage,
0x16FE0 => Script::Tangut,
0x17000..=0x187EC => Script::Tangut,
0x18800..=0x18AF2 => Script::Tangut,
_ => Script::Any,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_unicode_script() {
assert_eq!(Script::Han, get_script('京'));
assert_eq!(Script::Han, get_script('太'));
assert_eq!(Script::Hiragana, get_script('い'));
assert_eq!(Script::Katakana, get_script('グ'));
assert_eq!(Script::Common, get_script('ー'));
assert_eq!(Script::Latin, get_script('a'));
assert_eq!(Script::Latin, get_script('A'));
assert_eq!(Script::Common, get_script('0'));
assert_eq!(Script::Common, get_script('$'));
assert_eq!(Script::Common, get_script('@'));
assert_eq!(Script::Common, get_script('-'));
assert_eq!(Script::Common, get_script(' '));
assert_eq!(Script::Common, get_script('�'));
}
}
| 0 |
hf_public_repos/tokenizers | hf_public_repos/tokenizers/docs/README.md | ## Requirements
In order to generate the documentation, it is necessary to have a Python environment with the
following:
```python
pip install sphinx sphinx_rtd_theme setuptools_rust
```
It is also necessary to have the `tokenizers` library in this same environment, for Sphinx to
generate all the API Reference and links properly. If you want to visualize the documentation with
some modifications made to the Python bindings, make sure you build it from source.
## Building the documentation
Once everything is setup, you can build the documentation automatically for all the languages
using the following command in the `/docs` folder:
```bash
make html_all
```
If you want to build only for a specific language, you can use:
```bash
make html O="-t python"
```
(Replacing `python` by the target language among `rust`, `node`, and `python`)
**NOTE**
If you are making any structural change to the documentation, it is recommended to clean the build
directory before rebuilding:
```bash
make clean && make html_all
```
| 0 |
hf_public_repos/tokenizers | hf_public_repos/tokenizers/docs/Makefile | # Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for those with `?=`
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
BUILDDIR ?= build
SOURCEDIR = source
# Put it first so that "make" without argument is like "make html_all".
html_all:
@echo "Generating doc for Rust"
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/rust" $(SPHINXOPTS) $(O) -t rust
@echo "Generating doc for Python"
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/python" $(SPHINXOPTS) $(O) -t python
@echo "Generating doc for Node.js"
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/node" $(SPHINXOPTS) $(O) -t node
.PHONY: html_all Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/entities.inc | .. entities:: python
:global:
class
class
classmethod
class method
Tokenizer
:class:`~tokenizers.Tokenizer`
Tokenizer.train
:meth:`~tokenizers.Tokenizer.train`
Tokenizer.save
:meth:`~tokenizers.Tokenizer.save`
Tokenizer.from_file
:meth:`~tokenizers.Tokenizer.from_file`
Tokenizer.encode
:meth:`~tokenizers.Tokenizer.encode`
Tokenizer.encode_batch
:meth:`~tokenizers.Tokenizer.encode_batch`
Tokenizer.decode
:meth:`~tokenizers.Tokenizer.decode`
Tokenizer.decode_batch
:meth:`~tokenizers.Tokenizer.decode_batch`
Tokenizer.token_to_id
:meth:`~tokenizers.Tokenizer.token_to_id`
Tokenizer.enable_padding
:meth:`~tokenizers.Tokenizer.enable_padding`
Encoding
:class:`~tokenizers.Encoding`
TemplateProcessing
:class:`~tokenizers.processors.TemplateProcessing`
Normalizer
:class:`~tokenizers.normalizers.Normalizer`
normalizers.Sequence
:class:`~tokenizers.normalizers.Sequence`
pre_tokenizers.Whitespace
:class:`~tokenizers.pre_tokenizers.Whitespace`
PreTokenizer
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
models.BPE
:class:`~tokenizers.models.BPE`
models.Unigram
:class:`~tokenizers.models.Unigram`
models.WordLevel
:class:`~tokenizers.models.WordLevel`
models.WordPiece
:class:`~tokenizers.models.WordPiece`
Decoder
:class:`~tokenizers.decoders.Decoder`
.. entities:: rust
:global:
class
struct
classmethod
static method
Tokenizer
:rust_struct:`~tokenizers::tokenizer::Tokenizer`
Tokenizer.train
:rust_meth:`~tokenizers::tokenizer::Tokenizer::train`
Tokenizer.save
:rust_meth:`~tokenizers::tokenizer::Tokenizer::save`
Tokenizer.from_file
:rust_meth:`~tokenizers::tokenizer::Tokenizer::from_file`
Tokenizer.encode
:rust_meth:`~tokenizers::tokenizer::Tokenizer::encode`
Tokenizer.encode_batch
:rust_meth:`~tokenizers::tokenizer::Tokenizer::encode_batch`
Tokenizer.decode
:rust_meth:`~tokenizers::tokenizer::Tokenizer::decode`
Tokenizer.decode_batch
:rust_meth:`~tokenizers::tokenizer::Tokenizer::decode_batch`
Tokenizer.token_to_id
:rust_meth:`~tokenizers::tokenizer::Tokenizer::token_to_id`
Tokenizer.enable_padding
:rust_meth:`~tokenizers::tokenizer::Tokenizer::enable_padding`
Encoding
:rust_struct:`~tokenizers::tokenizer::Encoding`
TemplateProcessing
:rust_struct:`~tokenizers::processors::template::TemplateProcessing`
Normalizer
:rust_trait:`~tokenizers::tokenizer::Normalizer`
normalizers.Sequence
:rust_struct:`~tokenizers::normalizers::utils::Sequence`
pre_tokenizers.Whitespace
:rust_struct:`~tokenizers::normalizers::whitespace::Whitespace`
PreTokenizer
:rust_trait:`~tokenizers::tokenizer::PreTokenizer`
models.BPE
:rust_struct:`~tokenizers::models::bpe::BPE`
models.Unigram
:rust_struct:`~tokenizers::models::unigram::Unigram`
models.WordLevel
:rust_struct:`~tokenizers::models::wordlevel::WordLevel`
models.WordPiece
:rust_struct:`~tokenizers::models::wordpiece::WordPiece`
Decoder
:rust_trait:`~tokenizers::tokenizer::Decoder`
.. entities:: node
:global:
class
class
classmethod
static method
Tokenizer
:obj:`Tokenizer`
Tokenizer.train
:obj:`Tokenizer.train()`
Tokenizer.save
:obj:`Tokenizer.save()`
Tokenizer.from_file
:obj:`Tokenizer.fromFile()`
Tokenizer.encode
:obj:`Tokenizer.encode()`
Tokenizer.encode_batch
:obj:`Tokenizer.encodeBatch()`
Tokenizer.decode
:obj:`Tokenizer.decode()`
Tokenizer.decode_batch
:obj:`Tokenizer.decodeBatch()`
Tokenizer.token_to_id
:obj:`Tokenizer.tokenToId()`
Tokenizer.enable_padding
:obj:`Tokenizer.setPadding()`
Encoding
:obj:`Encoding`
TemplateProcessing
:obj:`TemplateProcessing`
Normalizer
:obj:`Normalizer`
normalizers.Sequence
:obj:`Sequence`
pre_tokenizers.Whitespace
:obj:`Whitespace`
PreTokenizer
:obj:`PreTokenizer`
models.BPE
:obj:`BPE`
models.Unigram
:obj:`Unigram`
models.WordLevel
:obj:`WordLevel`
models.WordPiece
:obj:`WordPiece`
Decoder
:obj:`Decoder`
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/quicktour.rst | Quicktour
====================================================================================================
Let's have a quick look at the 🤗 Tokenizers library features. The library provides an
implementation of today's most used tokenizers that is both easy to use and blazing fast.
.. only:: python
It can be used to instantiate a :ref:`pretrained tokenizer <pretrained>` but we will start our
quicktour by building one from scratch and see how we can train it.
Build a tokenizer from scratch
----------------------------------------------------------------------------------------------------
To illustrate how fast the 🤗 Tokenizers library is, let's train a new tokenizer on `wikitext-103
<https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/>`__ (516M of
text) in just a few seconds. First things first, you will need to download this dataset and unzip it
with:
.. code-block:: bash
wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip
unzip wikitext-103-raw-v1.zip
Training the tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. entities:: python
BpeTrainer
:class:`~tokenizers.trainers.BpeTrainer`
vocab_size
:obj:`vocab_size`
min_frequency
:obj:`min_frequency`
special_tokens
:obj:`special_tokens`
unk_token
:obj:`unk_token`
pad_token
:obj:`pad_token`
.. entities:: rust
BpeTrainer
:rust_struct:`~tokenizers::models::bpe::BpeTrainer`
vocab_size
:obj:`vocab_size`
min_frequency
:obj:`min_frequency`
special_tokens
:obj:`special_tokens`
unk_token
:obj:`unk_token`
pad_token
:obj:`pad_token`
.. entities:: node
BpeTrainer
BpeTrainer
vocab_size
:obj:`vocabSize`
min_frequency
:obj:`minFrequency`
special_tokens
:obj:`specialTokens`
unk_token
:obj:`unkToken`
pad_token
:obj:`padToken`
In this tour, we will build and train a Byte-Pair Encoding (BPE) tokenizer. For more information
about the different type of tokenizers, check out this `guide
<https://huggingface.co/docs/transformers/main/en/tokenizer_summary#summary-of-the-tokenizers>`__ in the 🤗 Transformers
documentation. Here, training the tokenizer means it will learn merge rules by:
- Start with all the characters present in the training corpus as tokens.
- Identify the most common pair of tokens and merge it into one token.
- Repeat until the vocabulary (e.g., the number of tokens) has reached the size we want.
The main API of the library is the :entity:`class` :entity:`Tokenizer`, here is how we instantiate
one with a BPE model:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_tokenizer
:end-before: END init_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_tokenizer
:end-before: END quicktour_init_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_tokenizer
:end-before: END init_tokenizer
:dedent: 4
To train our tokenizer on the wikitext files, we will need to instantiate a `trainer`, in this case
a :entity:`BpeTrainer`
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_trainer
:end-before: END init_trainer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_trainer
:end-before: END quicktour_init_trainer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_trainer
:end-before: END init_trainer
:dedent: 4
We can set the training arguments like :entity:`vocab_size` or :entity:`min_frequency` (here left at
their default values of 30,000 and 0) but the most important part is to give the
:entity:`special_tokens` we plan to use later on (they are not used at all during training) so that
they get inserted in the vocabulary.
.. note::
The order in which you write the special tokens list matters: here :obj:`"[UNK]"` will get the
ID 0, :obj:`"[CLS]"` will get the ID 1 and so forth.
We could train our tokenizer right now, but it wouldn't be optimal. Without a pre-tokenizer that
will split our inputs into words, we might get tokens that overlap several words: for instance we
could get an :obj:`"it is"` token since those two words often appear next to each other. Using a
pre-tokenizer will ensure no token is bigger than a word returned by the pre-tokenizer. Here we want
to train a subword BPE tokenizer, and we will use the easiest pre-tokenizer possible by splitting
on whitespace.
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_pretok
:end-before: END init_pretok
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_pretok
:end-before: END quicktour_init_pretok
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_pretok
:end-before: END init_pretok
:dedent: 4
Now, we can just call the :entity:`Tokenizer.train` method with any list of files we want
to use:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START train
:end-before: END train
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_train
:end-before: END quicktour_train
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START train
:end-before: END train
:dedent: 4
This should only take a few seconds to train our tokenizer on the full wikitext dataset!
To save the tokenizer in one file that contains all its configuration and vocabulary, just use the
:entity:`Tokenizer.save` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START save
:end-before: END save
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_save
:end-before: END quicktour_save
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START save
:end-before: END save
:dedent: 4
and you can reload your tokenizer from that file with the :entity:`Tokenizer.from_file`
:entity:`classmethod`:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 12
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_reload_tokenizer
:end-before: END quicktour_reload_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 4
Using the tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Now that we have trained a tokenizer, we can use it on any text we want with the
:entity:`Tokenizer.encode` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START encode
:end-before: END encode
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_encode
:end-before: END quicktour_encode
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START encode
:end-before: END encode
:dedent: 4
This applied the full pipeline of the tokenizer on the text, returning an
:entity:`Encoding` object. To learn more about this pipeline, and how to apply (or
customize) parts of it, check out :doc:`this page <pipeline>`.
This :entity:`Encoding` object then has all the attributes you need for your deep
learning model (or other). The :obj:`tokens` attribute contains the segmentation of your text in
tokens:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_tokens
:end-before: END print_tokens
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_tokens
:end-before: END quicktour_print_tokens
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_tokens
:end-before: END print_tokens
:dedent: 4
Similarly, the :obj:`ids` attribute will contain the index of each of those tokens in the
tokenizer's vocabulary:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_ids
:end-before: END print_ids
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_ids
:end-before: END quicktour_print_ids
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_ids
:end-before: END print_ids
:dedent: 4
An important feature of the 🤗 Tokenizers library is that it comes with full alignment tracking,
meaning you can always get the part of your original sentence that corresponds to a given token.
Those are stored in the :obj:`offsets` attribute of our :entity:`Encoding` object. For
instance, let's assume we would want to find back what caused the :obj:`"[UNK]"` token to appear,
which is the token at index 9 in the list, we can just ask for the offset at the index:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_offsets
:end-before: END print_offsets
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_offsets
:end-before: END quicktour_print_offsets
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_offsets
:end-before: END print_offsets
:dedent: 4
and those are the indices that correspond to the emoji in the original sentence:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START use_offsets
:end-before: END use_offsets
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_use_offsets
:end-before: END quicktour_use_offsets
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START use_offsets
:end-before: END use_offsets
:dedent: 4
Post-processing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We might want our tokenizer to automatically add special tokens, like :obj:`"[CLS]"` or
:obj:`"[SEP]"`. To do this, we use a post-processor. :entity:`TemplateProcessing` is the
most commonly used, you just have to specify a template for the processing of single sentences and
pairs of sentences, along with the special tokens and their IDs.
When we built our tokenizer, we set :obj:`"[CLS]"` and :obj:`"[SEP]"` in positions 1 and 2 of our
list of special tokens, so this should be their IDs. To double-check, we can use the
:entity:`Tokenizer.token_to_id` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START check_sep
:end-before: END check_sep
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_check_sep
:end-before: END quicktour_check_sep
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START check_sep
:end-before: END check_sep
:dedent: 4
Here is how we can set the post-processing to give us the traditional BERT inputs:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_template_processing
:end-before: END init_template_processing
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_template_processing
:end-before: END quicktour_init_template_processing
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_template_processing
:end-before: END init_template_processing
:dedent: 4
Let's go over this snippet of code in more details. First we specify the template for single
sentences: those should have the form :obj:`"[CLS] $A [SEP]"` where :obj:`$A` represents our
sentence.
Then, we specify the template for sentence pairs, which should have the form
:obj:`"[CLS] $A [SEP] $B [SEP]"` where :obj:`$A` represents the first sentence and :obj:`$B` the
second one. The :obj:`:1` added in the template represent the `type IDs` we want for each part of
our input: it defaults to 0 for everything (which is why we don't have :obj:`$A:0`) and here we set
it to 1 for the tokens of the second sentence and the last :obj:`"[SEP]"` token.
Lastly, we specify the special tokens we used and their IDs in our tokenizer's vocabulary.
To check out this worked properly, let's try to encode the same sentence as before:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_special_tokens
:end-before: END print_special_tokens
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_special_tokens
:end-before: END quicktour_print_special_tokens
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_special_tokens
:end-before: END print_special_tokens
:dedent: 4
To check the results on a pair of sentences, we just pass the two sentences to
:entity:`Tokenizer.encode`:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_special_tokens_pair
:end-before: END print_special_tokens_pair
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_special_tokens_pair
:end-before: END quicktour_print_special_tokens_pair
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_special_tokens_pair
:end-before: END print_special_tokens_pair
:dedent: 4
You can then check the type IDs attributed to each token is correct with
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_type_ids
:end-before: END print_type_ids
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_type_ids
:end-before: END quicktour_print_type_ids
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_type_ids
:end-before: END print_type_ids
:dedent: 4
If you save your tokenizer with :entity:`Tokenizer.save`, the post-processor will be saved along.
Encoding multiple sentences in a batch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To get the full speed of the 🤗 Tokenizers library, it's best to process your texts by batches by
using the :entity:`Tokenizer.encode_batch` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START encode_batch
:end-before: END encode_batch
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_encode_batch
:end-before: END quicktour_encode_batch
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START encode_batch
:end-before: END encode_batch
:dedent: 4
The output is then a list of :entity:`Encoding` objects like the ones we saw before. You
can process together as many texts as you like, as long as it fits in memory.
To process a batch of sentences pairs, pass two lists to the
:entity:`Tokenizer.encode_batch` method: the list of sentences A and the list of sentences
B:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START encode_batch_pair
:end-before: END encode_batch_pair
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_encode_batch_pair
:end-before: END quicktour_encode_batch_pair
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START encode_batch_pair
:end-before: END encode_batch_pair
:dedent: 4
When encoding multiple sentences, you can automatically pad the outputs to the longest sentence
present by using :entity:`Tokenizer.enable_padding`, with the :entity:`pad_token` and its ID
(which we can double-check the id for the padding token with
:entity:`Tokenizer.token_to_id` like before):
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START enable_padding
:end-before: END enable_padding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_enable_padding
:end-before: END quicktour_enable_padding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START enable_padding
:end-before: END enable_padding
:dedent: 4
We can set the :obj:`direction` of the padding (defaults to the right) or a given :obj:`length` if
we want to pad every sample to that specific number (here we leave it unset to pad to the size of
the longest text).
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_batch_tokens
:end-before: END print_batch_tokens
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_batch_tokens
:end-before: END quicktour_print_batch_tokens
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_batch_tokens
:end-before: END print_batch_tokens
:dedent: 4
In this case, the `attention mask` generated by the tokenizer takes the padding into account:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_attention_mask
:end-before: END print_attention_mask
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_attention_mask
:end-before: END quicktour_print_attention_mask
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_attention_mask
:end-before: END print_attention_mask
:dedent: 4
.. _pretrained:
.. only:: python
Using a pretrained tokenizer
------------------------------------------------------------------------------------------------
You can load any tokenizer from the Hugging Face Hub as long as a `tokenizer.json` file is
available in the repository.
.. code-block:: python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-uncased")
Importing a pretrained tokenizer from legacy vocabulary files
------------------------------------------------------------------------------------------------
You can also import a pretrained tokenizer directly in, as long as you have its vocabulary file.
For instance, here is how to import the classic pretrained BERT tokenizer:
.. code-block:: python
from tokenizers import BertWordPieceTokenizer
tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True)
as long as you have downloaded the file `bert-base-uncased-vocab.txt` with
.. code-block:: bash
wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/pipeline.rst | The tokenization pipeline
====================================================================================================
When calling :entity:`Tokenizer.encode` or :entity:`Tokenizer.encode_batch`, the input text(s) go
through the following pipeline:
- :ref:`normalization`
- :ref:`pre-tokenization`
- :ref:`model`
- :ref:`post-processing`
We'll see in details what happens during each of those steps in detail, as well as when you want to
:ref:`decode <decoding>` some token ids, and how the 🤗 Tokenizers library allows you to customize
each of those steps to your needs. If you're already familiar with those steps and want to learn by
seeing some code, jump to :ref:`our BERT from scratch example <example>`.
For the examples that require a :entity:`Tokenizer`, we will use the tokenizer we trained
in the :doc:`quicktour`, which you can load with:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_reload_tokenizer
:end-before: END pipeline_reload_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 8
.. _normalization:
Normalization
----------------------------------------------------------------------------------------------------
Normalization is, in a nutshell, a set of operations you apply to a raw string to make it less
random or "cleaner". Common operations include stripping whitespace, removing accented characters
or lowercasing all text. If you're familiar with `Unicode normalization
<https://unicode.org/reports/tr15>`__, it is also a very common normalization operation applied
in most tokenizers.
Each normalization operation is represented in the 🤗 Tokenizers library by a
:entity:`Normalizer`, and you can combine several of those by using a
:entity:`normalizers.Sequence`. Here is a normalizer applying NFD Unicode normalization
and removing accents as an example:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START setup_normalizer
:end-before: END setup_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_setup_normalizer
:end-before: END pipeline_setup_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START setup_normalizer
:end-before: END setup_normalizer
:dedent: 8
You can manually test that normalizer by applying it to any string:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START test_normalizer
:end-before: END test_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_test_normalizer
:end-before: END pipeline_test_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START test_normalizer
:end-before: END test_normalizer
:dedent: 8
When building a :entity:`Tokenizer`, you can customize its normalizer by just changing
the corresponding attribute:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START replace_normalizer
:end-before: END replace_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_replace_normalizer
:end-before: END pipeline_replace_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START replace_normalizer
:end-before: END replace_normalizer
:dedent: 8
Of course, if you change the way a tokenizer applies normalization, you should probably retrain it
from scratch afterward.
.. _pre-tokenization:
Pre-Tokenization
----------------------------------------------------------------------------------------------------
Pre-tokenization is the act of splitting a text into smaller objects that give an upper bound to
what your tokens will be at the end of training. A good way to think of this is that the
pre-tokenizer will split your text into "words" and then, your final tokens will be parts of those
words.
An easy way to pre-tokenize inputs is to split on spaces and punctuations, which is done by the
:entity:`pre_tokenizers.Whitespace` pre-tokenizer:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START setup_pre_tokenizer
:end-before: END setup_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_setup_pre_tokenizer
:end-before: END pipeline_setup_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START setup_pre_tokenizer
:end-before: END setup_pre_tokenizer
:dedent: 8
The output is a list of tuples, with each tuple containing one word and its span in the original
sentence (which is used to determine the final :obj:`offsets` of our :entity:`Encoding`).
Note that splitting on punctuation will split contractions like :obj:`"I'm"` in this example.
You can combine together any :entity:`PreTokenizer` together. For
instance, here is a pre-tokenizer that will split on space, punctuation and digits, separating
numbers in their individual digits:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START combine_pre_tokenizer
:end-before: END combine_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_combine_pre_tokenizer
:end-before: END pipeline_combine_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START combine_pre_tokenizer
:end-before: END combine_pre_tokenizer
:dedent: 8
As we saw in the :doc:`quicktour`, you can customize the pre-tokenizer of a
:entity:`Tokenizer` by just changing the corresponding attribute:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START replace_pre_tokenizer
:end-before: END replace_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_replace_pre_tokenizer
:end-before: END pipeline_replace_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START replace_pre_tokenizer
:end-before: END replace_pre_tokenizer
:dedent: 8
Of course, if you change the way the pre-tokenizer, you should probably retrain your tokenizer from
scratch afterward.
.. _model:
The Model
----------------------------------------------------------------------------------------------------
Once the input texts are normalized and pre-tokenized, the :entity:`Tokenizer` applies the model on
the pre-tokens. This is the part of the pipeline that needs training on your corpus (or that has
been trained if you are using a pretrained tokenizer).
The role of the model is to split your "words" into tokens, using the rules it has learned. It's
also responsible for mapping those tokens to their corresponding IDs in the vocabulary of the model.
This model is passed along when intializing the :entity:`Tokenizer` so you already know
how to customize this part. Currently, the 🤗 Tokenizers library supports:
- :entity:`models.BPE`
- :entity:`models.Unigram`
- :entity:`models.WordLevel`
- :entity:`models.WordPiece`
For more details about each model and its behavior, you can check `here <components#models>`__
.. _post-processing:
Post-Processing
----------------------------------------------------------------------------------------------------
Post-processing is the last step of the tokenization pipeline, to perform any additional
transformation to the :entity:`Encoding` before it's returned, like adding potential
special tokens.
As we saw in the quick tour, we can customize the post processor of a :entity:`Tokenizer`
by setting the corresponding attribute. For instance, here is how we can post-process to make the
inputs suitable for the BERT model:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START setup_processor
:end-before: END setup_processor
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_setup_processor
:end-before: END pipeline_setup_processor
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START setup_processor
:end-before: END setup_processor
:dedent: 8
Note that contrarily to the pre-tokenizer or the normalizer, you don't need to retrain a tokenizer
after changing its post-processor.
.. _example:
All together: a BERT tokenizer from scratch
----------------------------------------------------------------------------------------------------
Let's put all those pieces together to build a BERT tokenizer. First, BERT relies on WordPiece, so
we instantiate a new :entity:`Tokenizer` with this model:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_tokenizer
:end-before: END bert_setup_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_tokenizer
:end-before: END bert_setup_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_tokenizer
:end-before: END bert_setup_tokenizer
:dedent: 8
Then we know that BERT preprocesses texts by removing accents and lowercasing. We also use a unicode
normalizer:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_normalizer
:end-before: END bert_setup_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_normalizer
:end-before: END bert_setup_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_normalizer
:end-before: END bert_setup_normalizer
:dedent: 8
The pre-tokenizer is just splitting on whitespace and punctuation:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_pre_tokenizer
:end-before: END bert_setup_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_pre_tokenizer
:end-before: END bert_setup_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_pre_tokenizer
:end-before: END bert_setup_pre_tokenizer
:dedent: 8
And the post-processing uses the template we saw in the previous section:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_processor
:end-before: END bert_setup_processor
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_processor
:end-before: END bert_setup_processor
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_processor
:end-before: END bert_setup_processor
:dedent: 8
We can use this tokenizer and train on it on wikitext like in the :doc:`quicktour`:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_train_tokenizer
:end-before: END bert_train_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_train_tokenizer
:end-before: END bert_train_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_train_tokenizer
:end-before: END bert_train_tokenizer
:dedent: 8
.. _decoding:
Decoding
----------------------------------------------------------------------------------------------------
.. entities:: python
bert_tokenizer
:obj:`bert_tokenizer`
.. entities:: rust
bert_tokenizer
:obj:`bert_tokenizer`
.. entities:: node
bert_tokenizer
:obj:`bertTokenizer`
On top of encoding the input texts, a :entity:`Tokenizer` also has an API for decoding,
that is converting IDs generated by your model back to a text. This is done by the methods
:entity:`Tokenizer.decode` (for one predicted text) and :entity:`Tokenizer.decode_batch` (for a
batch of predictions).
The `decoder` will first convert the IDs back to tokens (using the tokenizer's vocabulary) and
remove all special tokens, then join those tokens with spaces:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START test_decoding
:end-before: END test_decoding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_test_decoding
:end-before: END pipeline_test_decoding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START test_decoding
:end-before: END test_decoding
:dedent: 8
If you used a model that added special characters to represent subtokens of a given "word" (like
the :obj:`"##"` in WordPiece) you will need to customize the `decoder` to treat them properly. If we
take our previous :entity:`bert_tokenizer` for instance the default decoding will give:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_test_decoding
:end-before: END bert_test_decoding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_test_decoding
:end-before: END bert_test_decoding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_test_decoding
:end-before: END bert_test_decoding
:dedent: 8
But by changing it to a proper decoder, we get:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_proper_decoding
:end-before: END bert_proper_decoding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_proper_decoding
:end-before: END bert_proper_decoding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_proper_decoding
:end-before: END bert_proper_decoding
:dedent: 8
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/components.rst | Components
====================================================================================================
When building a Tokenizer, you can attach various types of components to this Tokenizer in order
to customize its behavior. This page lists most provided components.
.. _normalizers:
.. entities:: python
BertNormalizer.clean_text
clean_text
BertNormalizer.handle_chinese_chars
handle_chinese_chars
BertNormalizer.strip_accents
strip_accents
BertNormalizer.lowercase
lowercase
Normalizer.Sequence
``Sequence([NFKC(), Lowercase()])``
PreTokenizer.Sequence
``Sequence([Punctuation(), WhitespaceSplit()])``
SplitDelimiterBehavior.removed
:obj:`removed`
SplitDelimiterBehavior.isolated
:obj:`isolated`
SplitDelimiterBehavior.merged_with_previous
:obj:`merged_with_previous`
SplitDelimiterBehavior.merged_with_next
:obj:`merged_with_next`
SplitDelimiterBehavior.contiguous
:obj:`contiguous`
.. entities:: rust
BertNormalizer.clean_text
clean_text
BertNormalizer.handle_chinese_chars
handle_chinese_chars
BertNormalizer.strip_accents
strip_accents
BertNormalizer.lowercase
lowercase
Normalizer.Sequence
``Sequence::new(vec![NFKC, Lowercase])``
PreTokenizer.Sequence
``Sequence::new(vec![Punctuation, WhitespaceSplit])``
SplitDelimiterBehavior.removed
:obj:`Removed`
SplitDelimiterBehavior.isolated
:obj:`Isolated`
SplitDelimiterBehavior.merged_with_previous
:obj:`MergedWithPrevious`
SplitDelimiterBehavior.merged_with_next
:obj:`MergedWithNext`
SplitDelimiterBehavior.contiguous
:obj:`Contiguous`
.. entities:: node
BertNormalizer.clean_text
cleanText
BertNormalizer.handle_chinese_chars
handleChineseChars
BertNormalizer.strip_accents
stripAccents
BertNormalizer.lowercase
lowercase
Normalizer.Sequence
..
PreTokenizer.Sequence
..
SplitDelimiterBehavior.removed
:obj:`removed`
SplitDelimiterBehavior.isolated
:obj:`isolated`
SplitDelimiterBehavior.merged_with_previous
:obj:`mergedWithPrevious`
SplitDelimiterBehavior.merged_with_next
:obj:`mergedWithNext`
SplitDelimiterBehavior.contiguous
:obj:`contiguous`
Normalizers
----------------------------------------------------------------------------------------------------
A ``Normalizer`` is in charge of pre-processing the input string in order to normalize it as
relevant for a given use case. Some common examples of normalization are the Unicode normalization
algorithms (NFD, NFKD, NFC & NFKC), lowercasing etc...
The specificity of ``tokenizers`` is that we keep track of the alignment while normalizing. This
is essential to allow mapping from the generated tokens back to the input text.
The ``Normalizer`` is optional.
.. list-table::
:header-rows: 1
* - Name
- Description
- Example
* - NFD
- NFD unicode normalization
-
* - NFKD
- NFKD unicode normalization
-
* - NFC
- NFC unicode normalization
-
* - NFKC
- NFKC unicode normalization
-
* - Lowercase
- Replaces all uppercase to lowercase
- Input: ``HELLO ὈΔΥΣΣΕΎΣ``
Output: ``hello ὀδυσσεύς``
* - Strip
- Removes all whitespace characters on the specified sides (left, right or both) of the input
- Input: ``" hi "``
Output: ``"hi"``
* - StripAccents
- Removes all accent symbols in unicode (to be used with NFD for consistency)
- Input: ``é``
Ouput: ``e``
* - Replace
- Replaces a custom string or regexp and changes it with given content
- ``Replace("a", "e")`` will behave like this:
Input: ``"banana"``
Ouput: ``"benene"``
* - BertNormalizer
- Provides an implementation of the Normalizer used in the original BERT. Options
that can be set are:
- :entity:`BertNormalizer.clean_text`
- :entity:`BertNormalizer.handle_chinese_chars`
- :entity:`BertNormalizer.strip_accents`
- :entity:`BertNormalizer.lowercase`
-
* - Sequence
- Composes multiple normalizers that will run in the provided order
- :entity:`Normalizer.Sequence`
.. _pre-tokenizers:
Pre tokenizers
----------------------------------------------------------------------------------------------------
The ``PreTokenizer`` takes care of splitting the input according to a set of rules. This
pre-processing lets you ensure that the underlying ``Model`` does not build tokens across multiple
"splits".
For example if you don't want to have whitespaces inside a token, then you can have a
``PreTokenizer`` that splits on these whitespaces.
You can easily combine multiple ``PreTokenizer`` together using a ``Sequence`` (see below).
The ``PreTokenizer`` is also allowed to modify the string, just like a ``Normalizer`` does. This
is necessary to allow some complicated algorithms that require to split before normalizing (e.g.
the ByteLevel)
.. list-table::
:header-rows: 1
* - Name
- Description
- Example
* - ByteLevel
- Splits on whitespaces while remapping all the bytes to a set of visible characters. This
technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties:
- Since it maps on bytes, a tokenizer using this only requires **256** characters as initial
alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode
characters.
- A consequence of the previous point is that it is absolutely unnecessary to have an
unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)
- For non ascii characters, it gets completely unreadable, but it works nonetheless!
- Input: ``"Hello my friend, how are you?"``
Ouput: ``"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"``
* - Whitespace
- Splits on word boundaries (using the following regular expression: ``\w+|[^\w\s]+``
- Input: ``"Hello there!"``
Output: ``"Hello", "there", "!"``
* - WhitespaceSplit
- Splits on any whitespace character
- Input: ``"Hello there!"``
Output: ``"Hello", "there!"``
* - Punctuation
- Will isolate all punctuation characters
- Input: ``"Hello?"``
Ouput: ``"Hello", "?"``
* - Metaspace
- Splits on whitespaces and replaces them with a special char "▁" (U+2581)
- Input: ``"Hello there"``
Ouput: ``"Hello", "▁there"``
* - CharDelimiterSplit
- Splits on a given character
- Example with ``x``:
Input: ``"Helloxthere"``
Ouput: ``"Hello", "there"``
* - Digits
- Splits the numbers from any other characters.
- Input: ``"Hello123there"``
Output: ```"Hello", "123", "there"```
* - Split
- Versatile pre-tokenizer that splits on provided pattern and according to provided behavior.
The pattern can be inverted if necessary.
- pattern should be either a custom string or regexp.
- behavior should be one of:
* :entity:`SplitDelimiterBehavior.removed`
* :entity:`SplitDelimiterBehavior.isolated`
* :entity:`SplitDelimiterBehavior.merged_with_previous`
* :entity:`SplitDelimiterBehavior.merged_with_next`
* :entity:`SplitDelimiterBehavior.contiguous`
- invert should be a boolean flag.
- Example with `pattern` = :obj:`" "`, `behavior` = :obj:`"isolated"`, `invert` = :obj:`False`:
Input: ``"Hello, how are you?"``
Output: ```"Hello,", " ", "how", " ", "are", " ", "you?"```
* - Sequence
- Lets you compose multiple ``PreTokenizer`` that will be run in the given order
- :entity:`PreTokenizer.Sequence`
.. _models:
Models
----------------------------------------------------------------------------------------------------
Models are the core algorithms used to actually tokenize, and therefore, they are the only mandatory
component of a Tokenizer.
.. list-table::
:header-rows: 1
* - Name
- Description
* - WordLevel
- This is the "classic" tokenization algorithm. It let's you simply map words to IDs
without anything fancy. This has the advantage of being really simple to use and
understand, but it requires extremely large vocabularies for a good coverage.
*Using this* ``Model`` *requires the use of a* ``PreTokenizer``. *No choice will be made by
this model directly, it simply maps input tokens to IDs*
* - BPE
- One of the most popular subword tokenization algorithm. The Byte-Pair-Encoding works by
starting with characters, while merging those that are the most frequently seen together,
thus creating new tokens. It then works iteratively to build new tokens out of the most
frequent pairs it sees in a corpus.
BPE is able to build words it has never seen by using multiple subword tokens, and thus
requires smaller vocabularies, with less chances of having "unk" (unknown) tokens.
* - WordPiece
- This is a subword tokenization algorithm quite similar to BPE, used mainly by Google in
models like BERT. It uses a greedy algorithm, that tries to build long words first, splitting
in multiple tokens when entire words don't exist in the vocabulary. This is different from
BPE that starts from characters, building bigger tokens as possible.
It uses the famous ``##`` prefix to identify tokens that are part of a word (ie not starting
a word).
* - Unigram
- Unigram is also a subword tokenization algorithm, and works by trying to identify the best
set of subword tokens to maximize the probability for a given sentence. This is different
from BPE in the way that this is not deterministic based on a set of rules applied
sequentially. Instead Unigram will be able to compute multiple ways of tokenizing, while
choosing the most probable one.
.. _post-processors:
PostProcessor
----------------------------------------------------------------------------------------------------
After the whole pipeline, we sometimes want to insert some special tokens before feed
a tokenized string into a model like "[CLS] My horse is amazing [SEP]". The ``PostProcessor``
is the component doing just that.
.. list-table::
:header-rows: 1
* - Name
- Description
- Example
* - TemplateProcessing
- Let's you easily template the post processing, adding special tokens, and specifying
the ``type_id`` for each sequence/special token. The template is given two strings
representing the single sequence and the pair of sequences, as well as a set of
special tokens to use.
- Example, when specifying a template with these values:
- single: ``"[CLS] $A [SEP]"``
- pair: ``"[CLS] $A [SEP] $B [SEP]"``
- special tokens:
- ``"[CLS]"``
- ``"[SEP]"``
Input: ``("I like this", "but not this")``
Output: ``"[CLS] I like this [SEP] but not this [SEP]"``
.. _decoders:
Decoders
----------------------------------------------------------------------------------------------------
The Decoder knows how to go from the IDs used by the Tokenizer, back to a readable piece of text.
Some ``Normalizer`` and ``PreTokenizer`` use special characters or identifiers that need to be
reverted for example.
.. list-table::
:header-rows: 1
* - Name
- Description
* - ByteLevel
- Reverts the ByteLevel PreTokenizer. This PreTokenizer encodes at the byte-level, using
a set of visible Unicode characters to represent each byte, so we need a Decoder to
revert this process and get something readable again.
* - Metaspace
- Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifer ``▁`` to
identify whitespaces, and so this Decoder helps with decoding these.
* - WordPiece
- Reverts the WordPiece Model. This model uses a special identifier ``##`` for continuing
subwords, and so this Decoder helps with decoding these.
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("./_ext"))
sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "tokenizers"
copyright = "2020, huggingface"
author = "huggingface"
# The full version, including alpha/beta/rc tags
release = ""
# -- Custom information ------------------------------------------------------
# The possible values for languages (used by `_ext/entities`)
languages = ["node", "rust", "python"]
# This defines the version used to generate links to docs.rs
rust_version = "latest"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "entities", "rust_doc", "toctree_tags"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"analytics_id": "UA-83738774-2"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
def setup(app):
for language in languages:
if not tags.has(language):
exclude_patterns.append(f"tutorials/{language}/*")
app.add_css_file("css/huggingface.css")
app.add_css_file("css/code-snippets.css")
app.add_js_file("js/custom.js")
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/index.rst | Tokenizers
====================================================================================================
Fast State-of-the-art tokenizers, optimized for both research and production
`🤗 Tokenizers`_ provides an implementation of today's most used tokenizers, with
a focus on performance and versatility. These tokenizers are also used in
`🤗 Transformers`_.
.. _🤗 Tokenizers: https://github.com/huggingface/tokenizers
.. _🤗 Transformers: https://github.com/huggingface/transformers
Main features:
----------------------------------------------------------------------------------------------------
- Train new vocabularies and tokenize, using today's most used tokenizers.
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for both research and production.
- Full alignment tracking. Even with destructive normalization, it's always possible to get
the part of the original sentence that corresponds to any token.
- Does all the pre-processing: Truncation, Padding, add the special tokens your model needs.
.. toctree::
:maxdepth: 2
:caption: Getting Started
quicktour
installation/main
pipeline
components
.. toctree-tags::
:maxdepth: 3
:caption: Using 🤗 Tokenizers
:glob:
:python:tutorials/python/*
.. toctree::
:maxdepth: 3
:caption: API Reference
api/reference
.. include:: entities.inc
| 0 |
hf_public_repos/tokenizers/docs/source/_static | hf_public_repos/tokenizers/docs/source/_static/js/custom.js | // These three variables below need to be updated at each release for the selectors.
const languages = [ "rust", "python", "node" ];
// Last stable version for each language
const stableVersion = {
"rust": "master",
"python": "v0.10.0",
"node": "master"
}
// Dictionary doc folder to Label for each language
const versionMapping = {
"rust": {
"master": "master",
},
"python": {
"master": "master",
"v0.9.4": "v0.9.4",
"v0.10.0": "v0.10.0",
},
"node": {
"master": "master",
}
};
// Dictionnary language name to Label
const languageName = {
"rust": "Rust",
"python": "Python",
"node": "Node.js"
};
const defaultLanguage = "python";
function addIcon() {
const huggingFaceLogo =
"https://huggingface.co/landing/assets/transformers-docs/huggingface_logo.svg";
const image = document.createElement("img");
image.setAttribute("src", huggingFaceLogo);
const div = document.createElement("div");
div.appendChild(image);
div.style.textAlign = 'center';
div.style.paddingTop = '30px';
div.style.backgroundColor = '#6670FF';
const scrollDiv = document.querySelector(".wy-side-scroll");
scrollDiv.prepend(div);
}
function addCustomFooter() {
const customFooter = document.createElement("div");
const questionOrIssue = document.createElement("div");
questionOrIssue.innerHTML =
"Stuck? Read our <a href='https://medium.com/huggingface'>Blog posts</a>" +
" or <a href='https://github.com/huggingface/tokenizers'>Create an issue</a>";
customFooter.appendChild(questionOrIssue);
customFooter.classList.add("footer");
const social = document.createElement("div");
social.classList.add("footer__Social");
const imageDetails = [ {
link: "https://huggingface.co",
imageLink: "https://huggingface.co/landing/assets/transformers-docs/website.svg"
}, {
link: "https://twitter.com/huggingface",
imageLink: "https://huggingface.co/landing/assets/transformers-docs/twitter.svg"
}, {
link: "https://github.com/huggingface",
imageLink: "https://huggingface.co/landing/assets/transformers-docs/github.svg"
}, {
link: "https://www.linkedin.com/company/huggingface/",
imageLink: "https://huggingface.co/landing/assets/transformers-docs/linkedin.svg"
} ];
imageDetails.forEach(imageLinks => {
const link = document.createElement("a");
const image = document.createElement("img");
image.src = imageLinks.imageLink;
link.href = imageLinks.link;
image.style.width = "30px";
image.classList.add("footer__CustomImage");
link.appendChild(image);
social.appendChild(link);
});
customFooter.appendChild(social);
document.querySelector("footer").appendChild(customFooter);
}
function addGithubButton() {
const div = `
<div class="github-repo">
<a class="github-button"
href="https://github.com/huggingface/tokenizers"
data-size="large"
data-show-count="true"
aria-label="Star huggingface/tokenizers on GitHub">
Star
</a>
</div>
`;
document.querySelector(".wy-side-nav-search .icon-home").insertAdjacentHTML('afterend', div);
}
function addVersionControl() {
// Default language and version
let language = defaultLanguage;
// To grab the version currently in view, we parse the url
let parts = location.pathname.split('/');
const languageIndex = parts.findIndex((part) => languages.includes(part));
language = parts[languageIndex];
let version = stableVersion[language];
const versionIndex = languageIndex + 1;
// If a version is specified, update it
if (parts[versionIndex] != "" && !parts[versionIndex].endsWith(".html")) {
// If `latest`, let's keep the default (should be the explicit latest version)
if (parts[versionIndex] != "latest") {
version = parts[versionIndex];
}
// Otherwise redirect to the latest (if not opening locally)
} else if (!parts[parts.length - 1].endsWith(".html")) {
return window.location.pathname = [language, version, parts.splice(versionIndex)].join("/");
// Opening locally, just don't show the version/language selector
} else {
return
}
// Language Menu
const languageMenu = document.createElement("div");
languageMenu.classList.add("menu-dropdown");
languageMenu.innerHTML = languages.map((lang) => {
let isVersion = false;
let updatedParts = parts.map((l, i) => {
if (isVersion) {
isVersion = false;
return 'latest';
}
if (i == languageIndex) {
isVersion = true;
return lang;
} else {
return l;
}
});
return `
<a class="dropdown-link ${lang == language? 'active' : ''}"
href=${updatedParts.join("/")}>
${languageName[lang]}
</a>
`;
}).join("\n");
// Version Menu
const versionMenu = document.createElement("div");
versionMenu.classList.add("menu-dropdown");
versionMenu.innerHTML = Object.entries(versionMapping[language]).map(([key, value]) => {
let updatedParts = parts.map((v, i) => {
if (i == versionIndex) {
return key;
} else {
return v
}
});
return `
<a class="dropdown-link ${key == version ? 'active' : ''}"
href="${updatedParts.join('/')}">
${value}
</a>
`;
}).join("\n");
// Language button
const languageButton = document.createElement("div");
languageButton.classList.add("dropdown-button");
languageButton.innerText = languageName[language].concat(" ▼");
languageButton.addEventListener("click", () => {
versionMenu.classList.remove("show");
languageMenu.classList.toggle("show");
languageButton.classList.toggle("active");
});
// Button for version selection
const versionButton = document.createElement("div");
versionButton.classList.add("dropdown-button");
versionButton.innerText = version.concat(" ▼");
// Toggle the menu when we click on the button
versionButton.addEventListener("click", () => {
languageMenu.classList.remove("show");
versionMenu.classList.toggle("show");
versionButton.classList.toggle("active");
});
// Hide the menu when we click elsewhere
window.addEventListener("click", (event) => {
if (event.target != languageButton){
languageButton.classList.remove('active');
languageMenu.classList.remove('show');
}
if (event.target != versionButton){
versionButton.classList.remove('active');
versionMenu.classList.remove('show');
}
});
const buttonContainer = document.createElement("div");
buttonContainer.classList.add("button-container");
buttonContainer.appendChild(languageButton);
buttonContainer.appendChild(versionButton);
// Container
const div = document.createElement("div");
div.classList.add("selectors");
div.appendChild(buttonContainer);
div.appendChild(languageMenu);
div.appendChild(versionMenu);
div.style.paddingTop = '25px';
div.style.backgroundColor = '#6670FF';
div.style.display = 'block';
div.style.textAlign = 'center';
const scrollDiv = document.querySelector(".wy-side-scroll");
scrollDiv.insertBefore(div, scrollDiv.children[1]);
}
function addHfMenu() {
const div = `
<div class="hf-menu">
<a href="/welcome">🔥 Sign in</a>
<a href="/models">🚀 Models</a>
<a href="http://discuss.huggingface.co">💬 Forum</a>
</div>
`;
document.body.insertAdjacentHTML('afterbegin', div);
}
/*!
* github-buttons v2.2.10
* (c) 2019 なつき
* @license BSD-2-Clause
*/
/**
* modified to run programmatically
*/
function parseGithubButtons (){"use strict";var e=window.document,t=e.location,o=window.encodeURIComponent,r=window.decodeURIComponent,n=window.Math,a=window.HTMLElement,i=window.XMLHttpRequest,l="https://unpkg.com/github-buttons@2.2.10/dist/buttons.html",c=i&&i.prototype&&"withCredentials"in i.prototype,d=c&&a&&a.prototype.attachShadow&&!a.prototype.attachShadow.prototype,s=function(e,t,o){e.addEventListener?e.addEventListener(t,o):e.attachEvent("on"+t,o)},u=function(e,t,o){e.removeEventListener?e.removeEventListener(t,o):e.detachEvent("on"+t,o)},h=function(e,t,o){var r=function(n){return u(e,t,r),o(n)};s(e,t,r)},f=function(e,t,o){var r=function(n){if(t.test(e.readyState))return u(e,"readystatechange",r),o(n)};s(e,"readystatechange",r)},p=function(e){return function(t,o,r){var n=e.createElement(t);if(o)for(var a in o){var i=o[a];null!=i&&(null!=n[a]?n[a]=i:n.setAttribute(a,i))}if(r)for(var l=0,c=r.length;l<c;l++){var d=r[l];n.appendChild("string"==typeof d?e.createTextNode(d):d)}return n}},g=p(e),b=function(e){var t;return function(){t||(t=1,e.apply(this,arguments))}},m="body{margin:0}a{color:#24292e;text-decoration:none;outline:0}.octicon{display:inline-block;vertical-align:text-top;fill:currentColor}.widget{ display:inline-block;overflow:hidden;font-family:-apple-system, BlinkMacSystemFont, \"Segoe UI\", Helvetica, Arial, sans-serif;font-size:0;white-space:nowrap;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.btn,.social-count{display:inline-block;height:14px;padding:2px 5px;font-size:11px;font-weight:600;line-height:14px;vertical-align:bottom;cursor:pointer;border:1px solid #c5c9cc;border-radius:0.25em}.btn{background-color:#eff3f6;background-image:-webkit-linear-gradient(top, #fafbfc, #eff3f6 90%);background-image:-moz-linear-gradient(top, #fafbfc, #eff3f6 90%);background-image:linear-gradient(180deg, #fafbfc, #eff3f6 90%);background-position:-1px -1px;background-repeat:repeat-x;background-size:110% 110%;border-color:rgba(27,31,35,0.2);-ms-filter:\"progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFFAFBFC', endColorstr='#FFEEF2F5')\";*filter:progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFFAFBFC', endColorstr='#FFEEF2F5')}.btn:active{background-color:#e9ecef;background-image:none;border-color:#a5a9ac;border-color:rgba(27,31,35,0.35);box-shadow:inset 0 0.15em 0.3em rgba(27,31,35,0.15)}.btn:focus,.btn:hover{background-color:#e6ebf1;background-image:-webkit-linear-gradient(top, #f0f3f6, #e6ebf1 90%);background-image:-moz-linear-gradient(top, #f0f3f6, #e6ebf1 90%);background-image:linear-gradient(180deg, #f0f3f6, #e6ebf1 90%);border-color:#a5a9ac;border-color:rgba(27,31,35,0.35);-ms-filter:\"progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFF0F3F6', endColorstr='#FFE5EAF0')\";*filter:progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFF0F3F6', endColorstr='#FFE5EAF0')}.social-count{position:relative;margin-left:5px;background-color:#fff}.social-count:focus,.social-count:hover{color:#0366d6}.social-count b,.social-count i{position:absolute;top:50%;left:0;display:block;width:0;height:0;margin:-4px 0 0 -4px;border:solid transparent;border-width:4px 4px 4px 0;_line-height:0;_border-top-color:red !important;_border-bottom-color:red !important;_border-left-color:red !important;_filter:chroma(color=red)}.social-count b{border-right-color:#c5c9cc}.social-count i{margin-left:-3px;border-right-color:#fff}.lg .btn,.lg .social-count{height:16px;padding:5px 10px;font-size:12px;line-height:16px}.lg .social-count{margin-left:6px}.lg .social-count b,.lg .social-count i{margin:-5px 0 0 -5px;border-width:5px 5px 5px 0}.lg .social-count i{margin-left:-4px}\n",v={"mark-github":{width:16,height:16,path:'<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"/>'},eye:{width:16,height:16,path:'<path fill-rule="evenodd" d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"/>'},star:{width:14,height:16,path:'<path fill-rule="evenodd" d="M14 6l-4.9-.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14 7 11.67 11.33 14l-.93-4.74L14 6z"/>'},"repo-forked":{width:10,height:16,path:'<path fill-rule="evenodd" d="M8 1a1.993 1.993 0 0 0-1 3.72V6L5 8 3 6V4.72A1.993 1.993 0 0 0 2 1a1.993 1.993 0 0 0-1 3.72V6.5l3 3v1.78A1.993 1.993 0 0 0 5 15a1.993 1.993 0 0 0 1-3.72V9.5l3-3V4.72A1.993 1.993 0 0 0 8 1zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3 10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3-10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/>'},"issue-opened":{width:14,height:16,path:'<path fill-rule="evenodd" d="M7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm1 3H6v5h2V4zm0 6H6v2h2v-2z"/>'},"cloud-download":{width:16,height:16,path:'<path fill-rule="evenodd" d="M9 12h2l-3 3-3-3h2V7h2v5zm3-8c0-.44-.91-3-4.5-3C5.08 1 3 2.92 3 5 1.02 5 0 6.52 0 8c0 1.53 1 3 3 3h3V9.7H3C1.38 9.7 1.3 8.28 1.3 8c0-.17.05-1.7 1.7-1.7h1.3V5c0-1.39 1.56-2.7 3.2-2.7 2.55 0 3.13 1.55 3.2 1.8v1.2H12c.81 0 2.7.22 2.7 2.2 0 2.09-2.25 2.2-2.7 2.2h-2V11h2c2.08 0 4-1.16 4-3.5C16 5.06 14.08 4 12 4z"/>'}},w={},x=function(e,t,o){var r=p(e.ownerDocument),n=e.appendChild(r("style",{type:"text/css"}));n.styleSheet?n.styleSheet.cssText=m:n.appendChild(e.ownerDocument.createTextNode(m));var a,l,d=r("a",{className:"btn",href:t.href,target:"_blank",innerHTML:(a=t["data-icon"],l=/^large$/i.test(t["data-size"])?16:14,a=(""+a).toLowerCase().replace(/^octicon-/,""),{}.hasOwnProperty.call(v,a)||(a="mark-github"),'<svg version="1.1" width="'+l*v[a].width/v[a].height+'" height="'+l+'" viewBox="0 0 '+v[a].width+" "+v[a].height+'" class="octicon octicon-'+a+'" aria-hidden="true">'+v[a].path+"</svg>"),"aria-label":t["aria-label"]||void 0},[" ",r("span",{},[t["data-text"]||""])]);/\.github\.com$/.test("."+d.hostname)?/^https?:\/\/((gist\.)?github\.com\/[^\/?#]+\/[^\/?#]+\/archive\/|github\.com\/[^\/?#]+\/[^\/?#]+\/releases\/download\/|codeload\.github\.com\/)/.test(d.href)&&(d.target="_top"):(d.href="#",d.target="_self");var u,h,g,x,y=e.appendChild(r("div",{className:"widget"+(/^large$/i.test(t["data-size"])?" lg":"")},[d]));/^(true|1)$/i.test(t["data-show-count"])&&"github.com"===d.hostname&&(u=d.pathname.replace(/^(?!\/)/,"/").match(/^\/([^\/?#]+)(?:\/([^\/?#]+)(?:\/(?:(subscription)|(fork)|(issues)|([^\/?#]+)))?)?(?:[\/?#]|$)/))&&!u[6]?(u[2]?(h="/repos/"+u[1]+"/"+u[2],u[3]?(x="subscribers_count",g="watchers"):u[4]?(x="forks_count",g="network"):u[5]?(x="open_issues_count",g="issues"):(x="stargazers_count",g="stargazers")):(h="/users/"+u[1],g=x="followers"),function(e,t){var o=w[e]||(w[e]=[]);if(!(o.push(t)>1)){var r=b(function(){for(delete w[e];t=o.shift();)t.apply(null,arguments)});if(c){var n=new i;s(n,"abort",r),s(n,"error",r),s(n,"load",function(){var e;try{e=JSON.parse(n.responseText)}catch(e){return void r(e)}r(200!==n.status,e)}),n.open("GET",e),n.send()}else{var a=this||window;a._=function(e){a._=null,r(200!==e.meta.status,e.data)};var l=p(a.document)("script",{async:!0,src:e+(/\?/.test(e)?"&":"?")+"callback=_"}),d=function(){a._&&a._({meta:{}})};s(l,"load",d),s(l,"error",d),l.readyState&&f(l,/de|m/,d),a.document.getElementsByTagName("head")[0].appendChild(l)}}}.call(this,"https://api.github.com"+h,function(e,t){if(!e){var n=t[x];y.appendChild(r("a",{className:"social-count",href:t.html_url+"/"+g,target:"_blank","aria-label":n+" "+x.replace(/_count$/,"").replace("_"," ").slice(0,n<2?-1:void 0)+" on GitHub"},[r("b"),r("i"),r("span",{},[(""+n).replace(/\B(?=(\d{3})+(?!\d))/g,",")])]))}o&&o(y)})):o&&o(y)},y=window.devicePixelRatio||1,C=function(e){return(y>1?n.ceil(n.round(e*y)/y*2)/2:n.ceil(e))||0},F=function(e,t){e.style.width=t[0]+"px",e.style.height=t[1]+"px"},k=function(t,r){if(null!=t&&null!=r)if(t.getAttribute&&(t=function(e){for(var t={href:e.href,title:e.title,"aria-label":e.getAttribute("aria-label")},o=["icon","text","size","show-count"],r=0,n=o.length;r<n;r++){var a="data-"+o[r];t[a]=e.getAttribute(a)}return null==t["data-text"]&&(t["data-text"]=e.textContent||e.innerText),t}(t)),d){var a=g("span",{title:t.title||void 0});x(a.attachShadow({mode:"closed"}),t,function(){r(a)})}else{var i=g("iframe",{src:"javascript:0",title:t.title||void 0,allowtransparency:!0,scrolling:"no",frameBorder:0});F(i,[0,0]),i.style.border="none";var c=function(){var a,d=i.contentWindow;try{a=d.document.body}catch(t){return void e.body.appendChild(i.parentNode.removeChild(i))}u(i,"load",c),x.call(d,a,t,function(e){var a=function(e){var t=e.offsetWidth,o=e.offsetHeight;if(e.getBoundingClientRect){var r=e.getBoundingClientRect();t=n.max(t,C(r.width)),o=n.max(o,C(r.height))}return[t,o]}(e);i.parentNode.removeChild(i),h(i,"load",function(){F(i,a)}),i.src=l+"#"+(i.name=function(e){var t=[];for(var r in e){var n=e[r];null!=n&&t.push(o(r)+"="+o(n))}return t.join("&")}(t)),r(i)})};s(i,"load",c),e.body.appendChild(i)}};t.protocol+"//"+t.host+t.pathname===l?x(e.body,function(e){for(var t={},o=e.split("&"),n=0,a=o.length;n<a;n++){var i=o[n];if(""!==i){var l=i.split("=");t[r(l[0])]=null!=l[1]?r(l.slice(1).join("=")):void 0}}return t}(window.name||t.hash.replace(/^#/,""))):function(t){if(/m/.test(e.readyState)||!/g/.test(e.readyState)&&!e.documentElement.doScroll)setTimeout(t);else if(e.addEventListener){var o=b(t);h(e,"DOMContentLoaded",o),h(window,"load",o)}else f(e,/m/,t)}(function(){for(var t=e.querySelectorAll?e.querySelectorAll("a.github-button"):function(){for(var t=[],o=e.getElementsByTagName("a"),r=0,n=o.length;r<n;r++)~(" "+o[r].className+" ").replace(/[ \t\n\f\r]+/g," ").indexOf(" github-button ")&&t.push(o[r]);return t}(),o=0,r=t.length;o<r;o++)!function(e){k(e,function(t){e.parentNode.replaceChild(t,e)})}(t[o])})};
function onLoad() {
addIcon();
addVersionControl();
addCustomFooter();
addGithubButton();
parseGithubButtons();
addHfMenu();
}
window.addEventListener("load", onLoad);
| 0 |
hf_public_repos/tokenizers/docs/source/_static | hf_public_repos/tokenizers/docs/source/_static/css/code-snippets.css |
.highlight .c1, .highlight .sd{
color: #999
}
.highlight .nn, .highlight .k, .highlight .s1, .highlight .nb, .highlight .bp, .highlight .kc, .highlight .kt {
color: #FB8D68;
}
.highlight .kn, .highlight .nv, .highlight .s2, .highlight .ow, .highlight .kd, .highlight .kr, .highlight .s {
color: #6670FF;
}
.highlight .gp {
color: #FB8D68;
}
| 0 |
hf_public_repos/tokenizers/docs/source/_static | hf_public_repos/tokenizers/docs/source/_static/css/huggingface.css | /* Our DOM objects */
/* Version control */
.selectors {
margin-bottom: 10px;
}
.dropdown-button {
display: inline-block;
width: 50%;
background-color: #6670FF;
color: white;
border: none;
padding: 5px;
font-size: 15px;
cursor: pointer;
}
.dropdown-button:hover, .dropdown-button:focus, .dropdown-button.active {
background-color: #A6B0FF;
}
.dropdown-button.active {
background-color: #7988FF;
}
.menu-dropdown {
display: none;
background-color: #7988FF;
min-width: 160px;
overflow: auto;
font-size: 15px;
padding: 10px 0;
}
.menu-dropdown a {
color: white;
padding: 3px 4px;
text-decoration: none;
display: block;
}
.menu-dropdown a:hover {
background-color: #A6B0FF;
}
.dropdown-link.active {
background-color: #A6B0FF;
}
.show {
display: block;
}
/* The literal code blocks */
.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal {
color: #6670FF;
}
/* To keep the logo centered */
.wy-side-scroll {
width: auto;
font-size: 20px;
}
/* The div that holds the Hugging Face logo */
.HuggingFaceDiv {
width: 100%
}
/* The research field on top of the toc tree */
.wy-side-nav-search{
padding-top: 0;
background-color: #6670FF;
}
/* The toc tree */
.wy-nav-side{
background-color: #6670FF;
padding-bottom: 0;
}
/* The section headers in the toc tree */
.wy-menu-vertical p.caption{
background-color: #4d59ff;
line-height: 40px;
}
/* The selected items in the toc tree */
.wy-menu-vertical li.current{
background-color: #A6B0FF;
}
/* When a list item that does belong to the selected block from the toc tree is hovered */
.wy-menu-vertical li.current a:hover{
background-color: #B6C0FF;
}
/* When a list item that does NOT belong to the selected block from the toc tree is hovered. */
.wy-menu-vertical li a:hover{
background-color: #A7AFFB;
}
/* The text items on the toc tree */
.wy-menu-vertical a {
color: #FFFFDD;
font-family: Calibre-Light, sans-serif;
}
.wy-menu-vertical header, .wy-menu-vertical p.caption{
color: white;
font-family: Calibre-Light, sans-serif;
}
/* The color inside the selected toc tree block */
.wy-menu-vertical li.toctree-l2 a, .wy-menu-vertical li.toctree-l3 a, .wy-menu-vertical li.toctree-l4 a {
color: black;
}
/* Inside the depth-2 selected toc tree block */
.wy-menu-vertical li.toctree-l2.current>a {
background-color: #B6C0FF
}
.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a {
background-color: #C6D0FF
}
/* Inside the depth-3 selected toc tree block */
.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{
background-color: #D6E0FF
}
/* Inside code snippets */
.rst-content dl:not(.docutils) dt{
font-size: 15px;
}
/* Links */
a {
color: #6670FF;
}
/* Content bars */
.rst-content dl:not(.docutils) dt {
background-color: rgba(251, 141, 104, 0.1);
border-right: solid 2px #FB8D68;
border-left: solid 2px #FB8D68;
color: #FB8D68;
font-family: Calibre-Light, sans-serif;
border-top: none;
font-style: normal !important;
}
/* Expand button */
.wy-menu-vertical li.toctree-l2 span.toctree-expand,
.wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.current>a span.toctree-expand,
.wy-menu-vertical li.toctree-l3 span.toctree-expand{
color: black;
}
/* Max window size */
.wy-nav-content{
max-width: 1200px;
}
/* Mobile header */
.wy-nav-top{
background-color: #6670FF;
}
/* Source spans */
.rst-content .viewcode-link, .rst-content .viewcode-back{
color: #6670FF;
font-size: 110%;
letter-spacing: 2px;
text-transform: uppercase;
}
/* It would be better for table to be visible without horizontal scrolling */
.wy-table-responsive table td, .wy-table-responsive table th{
white-space: normal;
}
.footer {
margin-top: 20px;
}
.footer__Social {
display: flex;
flex-direction: row;
}
.footer__CustomImage {
margin: 2px 5px 0 0;
}
/* class and method names in doc */
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) code.descclassname{
font-family: Calibre, sans-serif;
font-size: 20px !important;
}
/* class name in doc*/
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname{
margin-right: 10px;
font-family: Calibre-Medium, sans-serif;
}
/* Method and class parameters */
.sig-param{
line-height: 23px;
}
/* Class introduction "class" string at beginning */
.rst-content dl:not(.docutils) .property{
font-size: 18px;
color: black;
}
/* FONTS */
body{
font-family: Calibre, sans-serif;
font-size: 16px;
}
h1 {
font-family: Calibre-Thin, sans-serif;
font-size: 70px;
}
h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend{
font-family: Calibre-Medium, sans-serif;
}
@font-face {
font-family: Calibre-Medium;
src: url(./Calibre-Medium.otf);
font-weight:400;
}
@font-face {
font-family: Calibre;
src: url(./Calibre-Regular.otf);
font-weight:400;
}
@font-face {
font-family: Calibre-Light;
src: url(./Calibre-Light.ttf);
font-weight:400;
}
@font-face {
font-family: Calibre-Thin;
src: url(./Calibre-Thin.otf);
font-weight:400;
}
/**
* Nav Links to other parts of huggingface.co
*/
div.hf-menu {
position: absolute;
top: 0;
right: 0;
padding-top: 20px;
padding-right: 20px;
z-index: 1000;
}
div.hf-menu a {
font-size: 14px;
letter-spacing: 0.3px;
text-transform: uppercase;
color: white;
-webkit-font-smoothing: antialiased;
background: linear-gradient(0deg, #6671ffb8, #9a66ffb8 50%);
padding: 10px 16px 6px 16px;
border-radius: 3px;
margin-left: 12px;
position: relative;
}
div.hf-menu a:active {
top: 1px;
}
@media (min-width: 768px) and (max-width: 1860px) {
.wy-breadcrumbs {
margin-top: 32px;
}
}
@media (max-width: 768px) {
div.hf-menu {
display: none;
}
}
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/api/python.inc | Input sequences
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These types represent all the different kinds of sequence that can be used as input of a Tokenizer.
Globally, any sequence can be either a string or a list of strings, according to the operating
mode of the tokenizer: ``raw text`` vs ``pre-tokenized``.
.. autodata:: tokenizers.TextInputSequence
.. autodata:: tokenizers.PreTokenizedInputSequence
.. autodata:: tokenizers.InputSequence
Encode inputs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These types represent all the different kinds of input that a :class:`~tokenizers.Tokenizer` accepts
when using :meth:`~tokenizers.Tokenizer.encode_batch`.
.. autodata:: tokenizers.TextEncodeInput
.. autodata:: tokenizers.PreTokenizedEncodeInput
.. autodata:: tokenizers.EncodeInput
Tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.Tokenizer
:members:
Encoding
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.Encoding
:members:
Added Tokens
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.AddedToken
:members:
Models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.models
:members:
Normalizers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.normalizers
:members:
Pre-tokenizers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.pre_tokenizers
:members:
Post-processor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.processors
:members:
Trainers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.trainers
:members:
Decoders
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.decoders
:members:
Visualizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.tools.Annotation
:members:
.. autoclass:: tokenizers.tools.EncodingVisualizer
:members: __call__
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/api/node.inc | Documentation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The node API has not been documented yet.
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/api/reference.rst | .. only:: python
.. include:: python.inc
.. only:: rust
.. include:: rust.inc
.. only:: node
.. include:: node.inc
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/api/rust.inc | Documentation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Rust API Reference is available directly on the `Docs.rs <https://docs.rs/tokenizers>`__
website.
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/_ext/toctree_tags.py | import re
from sphinx.directives.other import TocTree
class TocTreeTags(TocTree):
hasPat = re.compile("^\s*:(.+):(.+)$")
def filter_entries(self, entries):
filtered = []
for e in entries:
m = self.hasPat.match(e)
if m != None:
if self.env.app.tags.has(m.groups()[0]):
filtered.append(m.groups()[1])
else:
filtered.append(e)
return filtered
def run(self):
self.content = self.filter_entries(self.content)
return super().run()
def setup(app):
app.add_directive("toctree-tags", TocTreeTags)
return {
"version": "0.1",
}
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/_ext/rust_doc.py | from docutils import nodes
import sphinx
from sphinx.locale import _
from conf import rust_version
logger = sphinx.util.logging.getLogger(__name__)
class RustRef:
def __call__(self, name, rawtext, text, lineno, inliner, options={}, content=[]):
doctype = name.split("_")[1]
parts = text.split("::")
if text.startswith("~"):
title = parts[-1]
parts[0] = parts[0][1:]
else:
content = text
link = self.base_link()
if doctype == "struct":
l, title = self.make_struct_link(parts, title)
if doctype == "func":
l, title = self.make_func_link(parts, title)
if doctype == "meth":
l, title = self.make_meth_link(parts, title)
if doctype == "trait":
l, title = self.make_trait_link(parts, title)
link += l
node = nodes.reference(internal=False, refuri=link, text=title)
wrapper = nodes.literal(classes=["xref"])
wrapper += node
return [wrapper], []
def base_link(self):
return f"https://docs.rs/tokenizers/{rust_version}"
def make_struct_link(self, parts, title):
link = ""
struct_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/struct.{struct_name}.html"
return link, title
def make_func_link(self, parts, title):
link = ""
fn_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/fn.{fn_name}.html"
return link, title
def make_meth_link(self, parts, title):
meth_name = parts[-1]
if meth_name.endswith("()"):
meth_name = meth_name[:-2]
link, title = self.make_struct_link(parts[:-1], title)
link += f"#method.{meth_name}"
if not title.endswith(")"):
title += "()"
return link, title
def make_trait_link(self, parts, title):
link = ""
trait_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/trait.{trait_name}.html"
return link, title
def setup(app):
app.add_role("rust_struct", RustRef())
app.add_role("rust_func", RustRef())
app.add_role("rust_meth", RustRef())
app.add_role("rust_trait", RustRef())
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/_ext/entities.py | from collections import defaultdict, abc
from typing import cast
from docutils import nodes
from docutils.parsers.rst import Directive
import sphinx
from sphinx.locale import _
from sphinx.util.docutils import SphinxDirective
from sphinx.errors import ExtensionError
from conf import languages as LANGUAGES
logger = sphinx.util.logging.getLogger(__name__)
GLOBALNAME = "$GLOBAL$"
def update(d, u):
for k, v in u.items():
if isinstance(v, abc.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
class EntityNode(nodes.General, nodes.Element):
pass
class EntitiesNode(nodes.General, nodes.Element):
pass
class AllEntities:
def __init__(self):
self.entities = defaultdict(dict)
@classmethod
def install(cls, env):
if not hasattr(env, "entity_all_entities"):
entities = cls()
env.entity_all_entities = entities
return env.entity_all_entities
def merge(self, other):
self.entities.update(other.entities)
def purge(self, docname):
for env_docname in [GLOBALNAME, docname]:
self.entities[env_docname] = dict(
[
(name, entity)
for name, entity in self.entities[env_docname].items()
if entity["docname"] != docname
]
)
def _extract_entities(self, nodes):
pass
def _extract_options(self, nodes):
pass
def _add_entities(self, entities, language, is_global, docname):
scope = GLOBALNAME if is_global else docname
for entity in entities:
name = f'{language}-{entity["name"]}'
content = entity["content"]
if name in self.entities[scope]:
logger.warning(
f'Entity "{name}" has already been defined{" globally" if is_global else ""}',
location=docname,
)
self.entities[scope][name] = {"docname": docname, "content": content}
def _extract_global(self, nodes):
for node in nodes:
if node.tagname != "field":
raise Exception(f"Expected a field, found {node.tagname}")
name, _ = node.children
if name.tagname != "field_name":
raise Exception(f"Expected a field name here, found {name_node.tagname}")
if str(name.children[0]) == "global":
return True
def _extract_entities(self, nodes):
entities = []
for node in nodes:
if node.tagname != "definition_list_item":
raise Exception(f"Expected a list item here, found {node.tagname}")
name_node, content_node = node.children
if name_node.tagname != "term":
raise Exception(f"Expected a term here, found {name_node.tagname}")
if content_node.tagname != "definition":
raise Exception(f"Expected a definition here, found {content_node.tagname}")
name = str(name_node.children[0])
if len(content_node.children) == 1 and content_node.children[0].tagname == "paragraph":
content = content_node.children[0].children[0]
else:
content = content_node
entities.append({"name": name, "content": content})
return entities
def extract(self, node, docname):
is_global = False
entities = []
language = None
for node in node.children:
if language is None and node.tagname != "paragraph":
raise Exception(f"Expected language name:\n.. entities:: <LANGUAGE>")
elif language is None and node.tagname == "paragraph":
language = str(node.children[0])
if language not in LANGUAGES:
raise Exception(
f'Unknown language "{language}. Might be missing a newline after language"'
)
elif node.tagname == "field_list":
is_global = self._extract_global(node.children)
elif node.tagname == "definition_list":
entities.extend(self._extract_entities(node.children))
else:
raise Exception(f"Expected a list of terms/options, found {node.tagname}")
self._add_entities(entities, language, is_global, docname)
def resolve_pendings(self, app):
env = app.builder.env
updates = defaultdict(dict)
for env_docname in self.entities.keys():
for name, entity in self.entities[env_docname].items():
docname = entity["docname"]
node = entity["content"]
for node in node.traverse(sphinx.addnodes.pending_xref):
contnode = cast(nodes.TextElement, node[0].deepcopy())
newnode = None
typ = node["reftype"]
target = node["reftarget"]
refdoc = node.get("refdoc", docname)
domain = None
try:
if "refdomain" in node and node["refdomain"]:
# let the domain try to resolve the reference
try:
domain = env.domains[node["refdomain"]]
except KeyError as exc:
raise NoUri(target, typ) from exc
newnode = domain.resolve_xref(
env, refdoc, app.builder, typ, target, node, contnode
)
except NoUri:
newnode = contnode
updates[env_docname][name] = {
"docname": docname,
"content": newnode or contnode,
}
update(self.entities, updates)
def get(self, language, name, docname):
name = f"{language}-{name}"
if name in self.entities[docname]:
return self.entities[docname][name]
elif name in self.entities[GLOBALNAME]:
return self.entities[GLOBALNAME][name]
else:
return None
class EntitiesDirective(SphinxDirective):
has_content = True
def run(self):
content = nodes.definition_list()
self.state.nested_parse(self.content, self.content_offset, content)
try:
entities = AllEntities.install(self.env)
entities.extract(content, self.env.docname)
except Exception as err:
raise self.error(f'Malformed directive "entities": {err}')
return []
def entity_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
node = EntityNode()
node.entity = text
return [node], []
def process_entity_nodes(app, doctree, docname):
""" Replace all the entities by their content """
env = app.builder.env
entities = AllEntities.install(env)
entities.resolve_pendings(app)
language = None
try:
language = next(l for l in LANGUAGES if l in app.tags)
except Exception:
logger.warning(f"No language tag specified, not resolving entities in {docname}")
for node in doctree.traverse(EntityNode):
if language is None:
node.replace_self(nodes.Text(_(node.entity), _(node.entity)))
else:
entity = entities.get(language, node.entity, docname)
if entity is None:
node.replace_self(nodes.Text(_(node.entity), _(node.entity)))
logger.warning(f'Entity "{node.entity}" has not been defined', location=node)
else:
node.replace_self(entity["content"])
def purge_entities(app, env, docname):
""" Purge any entity that comes from the given docname """
entities = AllEntities.install(env)
entities.purge(docname)
def merge_entities(app, env, docnames, other):
""" Merge multiple environment entities """
entities = AllEntities.install(env)
other_entities = AllEntities.install(other)
entities.merge(other_entities)
def setup(app):
app.add_node(EntityNode)
app.add_node(EntitiesNode)
app.add_directive("entities", EntitiesDirective)
app.add_role("entity", entity_role)
app.connect("doctree-resolved", process_entity_nodes)
app.connect("env-merge-info", merge_entities)
app.connect("env-purge-doc", purge_entities)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 0 |
hf_public_repos/tokenizers/docs/source/tutorials | hf_public_repos/tokenizers/docs/source/tutorials/python/training_from_memory.rst | Training from memory
----------------------------------------------------------------------------------------------------
In the `Quicktour <quicktour>`__, we saw how to build and train a tokenizer using text files,
but we can actually use any Python Iterator. In this section we'll see a few different ways of
training our tokenizer.
For all the examples listed below, we'll use the same :class:`~tokenizers.Tokenizer` and
:class:`~tokenizers.trainers.Trainer`, built as following:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START init_tokenizer_trainer
:end-before: END init_tokenizer_trainer
:dedent: 8
This tokenizer is based on the :class:`~tokenizers.models.Unigram` model. It takes care of
normalizing the input using the NFKC Unicode normalization method, and uses a
:class:`~tokenizers.pre_tokenizers.ByteLevel` pre-tokenizer with the corresponding decoder.
For more information on the components used here, you can check `here <components>`__
The most basic way
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As you probably guessed already, the easiest way to train our tokenizer is by using a :obj:`List`:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START train_basic
:end-before: END train_basic
:dedent: 8
Easy, right? You can use anything working as an iterator here, be it a :obj:`List`, :obj:`Tuple`,
or a :obj:`np.Array`. Anything works as long as it provides strings.
Using the 🤗 Datasets library
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An awesome way to access one of the many datasets that exist out there is by using the 🤗 Datasets
library. For more information about it, you should check
`the official documentation here <https://huggingface.co/docs/datasets/>`__.
Let's start by loading our dataset:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START load_dataset
:end-before: END load_dataset
:dedent: 8
The next step is to build an iterator over this dataset. The easiest way to do this is probably by
using a generator:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START def_batch_iterator
:end-before: END def_batch_iterator
:dedent: 8
As you can see here, for improved efficiency we can actually provide a batch of examples used
to train, instead of iterating over them one by one. By doing so, we can expect performances very
similar to those we got while training directly from files.
With our iterator ready, we just need to launch the training. In order to improve the look of our
progress bars, we can specify the total length of the dataset:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START train_datasets
:end-before: END train_datasets
:dedent: 8
And that's it!
Using gzip files
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Since gzip files in Python can be used as iterators, it is extremely simple to train on such files:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START single_gzip
:end-before: END single_gzip
:dedent: 8
Now if we wanted to train from multiple gzip files, it wouldn't be much harder:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START multi_gzip
:end-before: END multi_gzip
:dedent: 8
And voilà!
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/installation/python.inc | 🤗 Tokenizers is tested on Python 3.5+.
You should install 🤗 Tokenizers in a
`virtual environment <https://docs.python.org/3/library/venv.html>`_. If you're unfamiliar with
Python virtual environments, check out the
`user guide <https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/>`__.
Create a virtual environment with the version of Python you're going to use and activate it.
Installation with pip
----------------------------------------------------------------------------------------------------
🤗 Tokenizers can be installed using pip as follows::
pip install tokenizers
Installation from sources
----------------------------------------------------------------------------------------------------
To use this method, you need to have the Rust language installed. You can follow
`the official guide <https://www.rust-lang.org/learn/get-started>`__ for more information.
If you are using a unix based OS, the installation should be as simple as running::
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
Or you can easiy update it with the following command::
rustup update
Once rust is installed, we can start retrieving the sources for 🤗 Tokenizers::
git clone https://github.com/huggingface/tokenizers
Then we go into the python bindings folder::
cd tokenizers/bindings/python
At this point you should have your `virtual environment`_ already activated. In order to
compile 🤗 Tokenizers, you need to::
pip install -e .
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/installation/node.inc | Installation with npm
----------------------------------------------------------------------------------------------------
You can simply install 🤗 Tokenizers with npm using::
npm install tokenizers
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/installation/rust.inc | Crates.io
----------------------------------------------------------------------------------------------------
🤗 Tokenizers is available on `crates.io <https://crates.io/crates/tokenizers>`__.
You just need to add it to your :obj:`Cargo.toml`::
tokenizers = "0.10"
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/installation/main.rst | Installation
====================================================================================================
.. only:: python
.. include:: python.inc
.. only:: rust
.. include:: rust.inc
.. only:: node
.. include:: node.inc
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/quicktour.mdx | # Quicktour
Let's have a quick look at the 🤗 Tokenizers library features. The
library provides an implementation of today's most used tokenizers that
is both easy to use and blazing fast.
## Build a tokenizer from scratch
To illustrate how fast the 🤗 Tokenizers library is, let's train a new
tokenizer on [wikitext-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/)
(516M of text) in just a few seconds. First things first, you will need
to download this dataset and unzip it with:
``` bash
wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip
unzip wikitext-103-raw-v1.zip
```
### Training the tokenizer
In this tour, we will build and train a Byte-Pair Encoding (BPE)
tokenizer. For more information about the different type of tokenizers,
check out this [guide](https://huggingface.co/transformers/tokenizer_summary.html) in
the 🤗 Transformers documentation. Here, training the tokenizer means it
will learn merge rules by:
- Start with all the characters present in the training corpus as
tokens.
- Identify the most common pair of tokens and merge it into one token.
- Repeat until the vocabulary (e.g., the number of tokens) has reached
the size we want.
The main API of the library is the `class` `Tokenizer`, here is how
we instantiate one with a BPE model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_tokenizer",
"end-before": "END init_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_tokenizer",
"end-before": "END quicktour_init_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_tokenizer",
"end-before": "END init_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
To train our tokenizer on the wikitext files, we will need to
instantiate a [trainer]{.title-ref}, in this case a
`BpeTrainer`
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_trainer",
"end-before": "END init_trainer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_trainer",
"end-before": "END quicktour_init_trainer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_trainer",
"end-before": "END init_trainer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
We can set the training arguments like `vocab_size` or `min_frequency` (here
left at their default values of 30,000 and 0) but the most important
part is to give the `special_tokens` we
plan to use later on (they are not used at all during training) so that
they get inserted in the vocabulary.
<Tip>
The order in which you write the special tokens list matters: here `"[UNK]"` will get the ID 0,
`"[CLS]"` will get the ID 1 and so forth.
</Tip>
We could train our tokenizer right now, but it wouldn't be optimal.
Without a pre-tokenizer that will split our inputs into words, we might
get tokens that overlap several words: for instance we could get an
`"it is"` token since those two words
often appear next to each other. Using a pre-tokenizer will ensure no
token is bigger than a word returned by the pre-tokenizer. Here we want
to train a subword BPE tokenizer, and we will use the easiest
pre-tokenizer possible by splitting on whitespace.
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_pretok",
"end-before": "END init_pretok",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_pretok",
"end-before": "END quicktour_init_pretok",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_pretok",
"end-before": "END init_pretok",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Now, we can just call the `Tokenizer.train` method with any list of files we want to use:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START train",
"end-before": "END train",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_train",
"end-before": "END quicktour_train",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START train",
"end-before": "END train",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
This should only take a few seconds to train our tokenizer on the full
wikitext dataset! To save the tokenizer in one file that contains all
its configuration and vocabulary, just use the
`Tokenizer.save` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START save",
"end-before": "END save",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_save",
"end-before": "END quicktour_save",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START save",
"end-before": "END save",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
and you can reload your tokenizer from that file with the
`Tokenizer.from_file`
`classmethod`:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 12}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_reload_tokenizer",
"end-before": "END quicktour_reload_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
### Using the tokenizer
Now that we have trained a tokenizer, we can use it on any text we want
with the `Tokenizer.encode` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START encode",
"end-before": "END encode",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_encode",
"end-before": "END quicktour_encode",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START encode",
"end-before": "END encode",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
This applied the full pipeline of the tokenizer on the text, returning
an `Encoding` object. To learn more
about this pipeline, and how to apply (or customize) parts of it, check out [this page](pipeline).
This `Encoding` object then has all the
attributes you need for your deep learning model (or other). The
`tokens` attribute contains the
segmentation of your text in tokens:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_tokens",
"end-before": "END print_tokens",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_tokens",
"end-before": "END quicktour_print_tokens",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_tokens",
"end-before": "END print_tokens",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Similarly, the `ids` attribute will
contain the index of each of those tokens in the tokenizer's
vocabulary:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_ids",
"end-before": "END print_ids",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_ids",
"end-before": "END quicktour_print_ids",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_ids",
"end-before": "END print_ids",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
An important feature of the 🤗 Tokenizers library is that it comes with
full alignment tracking, meaning you can always get the part of your
original sentence that corresponds to a given token. Those are stored in
the `offsets` attribute of our
`Encoding` object. For instance, let's
assume we would want to find back what caused the
`"[UNK]"` token to appear, which is the
token at index 9 in the list, we can just ask for the offset at the
index:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_offsets",
"end-before": "END print_offsets",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_offsets",
"end-before": "END quicktour_print_offsets",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_offsets",
"end-before": "END print_offsets",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
and those are the indices that correspond to the emoji in the original
sentence:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START use_offsets",
"end-before": "END use_offsets",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_use_offsets",
"end-before": "END quicktour_use_offsets",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START use_offsets",
"end-before": "END use_offsets",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
### Post-processing
We might want our tokenizer to automatically add special tokens, like
`"[CLS]"` or `"[SEP]"`. To do this, we use a post-processor.
`TemplateProcessing` is the most
commonly used, you just have to specify a template for the processing of
single sentences and pairs of sentences, along with the special tokens
and their IDs.
When we built our tokenizer, we set `"[CLS]"` and `"[SEP]"` in positions 1
and 2 of our list of special tokens, so this should be their IDs. To
double-check, we can use the `Tokenizer.token_to_id` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START check_sep",
"end-before": "END check_sep",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_check_sep",
"end-before": "END quicktour_check_sep",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START check_sep",
"end-before": "END check_sep",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Here is how we can set the post-processing to give us the traditional
BERT inputs:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_template_processing",
"end-before": "END init_template_processing",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_template_processing",
"end-before": "END quicktour_init_template_processing",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_template_processing",
"end-before": "END init_template_processing",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Let's go over this snippet of code in more details. First we specify
the template for single sentences: those should have the form
`"[CLS] $A [SEP]"` where
`$A` represents our sentence.
Then, we specify the template for sentence pairs, which should have the
form `"[CLS] $A [SEP] $B [SEP]"` where
`$A` represents the first sentence and
`$B` the second one. The
`:1` added in the template represent the `type IDs` we want for each part of our input: it defaults
to 0 for everything (which is why we don't have
`$A:0`) and here we set it to 1 for the
tokens of the second sentence and the last `"[SEP]"` token.
Lastly, we specify the special tokens we used and their IDs in our
tokenizer's vocabulary.
To check out this worked properly, let's try to encode the same
sentence as before:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_special_tokens",
"end-before": "END print_special_tokens",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_special_tokens",
"end-before": "END quicktour_print_special_tokens",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_special_tokens",
"end-before": "END print_special_tokens",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
To check the results on a pair of sentences, we just pass the two
sentences to `Tokenizer.encode`:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_special_tokens_pair",
"end-before": "END print_special_tokens_pair",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_special_tokens_pair",
"end-before": "END quicktour_print_special_tokens_pair",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_special_tokens_pair",
"end-before": "END print_special_tokens_pair",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
You can then check the type IDs attributed to each token is correct with
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_type_ids",
"end-before": "END print_type_ids",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_type_ids",
"end-before": "END quicktour_print_type_ids",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_type_ids",
"end-before": "END print_type_ids",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
If you save your tokenizer with `Tokenizer.save`, the post-processor will be saved along.
### Encoding multiple sentences in a batch
To get the full speed of the 🤗 Tokenizers library, it's best to
process your texts by batches by using the
`Tokenizer.encode_batch` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START encode_batch",
"end-before": "END encode_batch",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_encode_batch",
"end-before": "END quicktour_encode_batch",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START encode_batch",
"end-before": "END encode_batch",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The output is then a list of `Encoding`
objects like the ones we saw before. You can process together as many
texts as you like, as long as it fits in memory.
To process a batch of sentences pairs, pass two lists to the
`Tokenizer.encode_batch` method: the
list of sentences A and the list of sentences B:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START encode_batch_pair",
"end-before": "END encode_batch_pair",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_encode_batch_pair",
"end-before": "END quicktour_encode_batch_pair",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START encode_batch_pair",
"end-before": "END encode_batch_pair",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
When encoding multiple sentences, you can automatically pad the outputs
to the longest sentence present by using
`Tokenizer.enable_padding`, with the
`pad_token` and its ID (which we can
double-check the id for the padding token with
`Tokenizer.token_to_id` like before):
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START enable_padding",
"end-before": "END enable_padding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_enable_padding",
"end-before": "END quicktour_enable_padding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START enable_padding",
"end-before": "END enable_padding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
We can set the `direction` of the padding
(defaults to the right) or a given `length` if we want to pad every sample to that specific number (here
we leave it unset to pad to the size of the longest text).
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_batch_tokens",
"end-before": "END print_batch_tokens",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_batch_tokens",
"end-before": "END quicktour_print_batch_tokens",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_batch_tokens",
"end-before": "END print_batch_tokens",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
In this case, the `attention mask` generated by the
tokenizer takes the padding into account:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_attention_mask",
"end-before": "END print_attention_mask",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_attention_mask",
"end-before": "END quicktour_print_attention_mask",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_attention_mask",
"end-before": "END print_attention_mask",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Pretrained
<tokenizerslangcontent>
<python>
### Using a pretrained tokenizer
You can load any tokenizer from the Hugging Face Hub as long as a
`tokenizer.json` file is available in the repository.
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-uncased")
```
### Importing a pretrained tokenizer from legacy vocabulary files
You can also import a pretrained tokenizer directly in, as long as you
have its vocabulary file. For instance, here is how to import the
classic pretrained BERT tokenizer:
```python
from tokenizers import BertWordPieceTokenizer
tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True)
```
as long as you have downloaded the file `bert-base-uncased-vocab.txt` with
```bash
wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt
```
</python>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/training_from_memory.mdx | # Training from memory
In the [Quicktour](quicktour), we saw how to build and train a
tokenizer using text files, but we can actually use any Python Iterator.
In this section we'll see a few different ways of training our
tokenizer.
For all the examples listed below, we'll use the same [`~tokenizers.Tokenizer`] and
[`~tokenizers.trainers.Trainer`], built as
following:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START init_tokenizer_trainer",
"end-before": "END init_tokenizer_trainer",
"dedent": 8}
</literalinclude>
This tokenizer is based on the [`~tokenizers.models.Unigram`] model. It
takes care of normalizing the input using the NFKC Unicode normalization
method, and uses a [`~tokenizers.pre_tokenizers.ByteLevel`] pre-tokenizer with the corresponding decoder.
For more information on the components used here, you can check
[here](components).
## The most basic way
As you probably guessed already, the easiest way to train our tokenizer
is by using a `List`{.interpreted-text role="obj"}:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START train_basic",
"end-before": "END train_basic",
"dedent": 8}
</literalinclude>
Easy, right? You can use anything working as an iterator here, be it a
`List`{.interpreted-text role="obj"}, `Tuple`{.interpreted-text
role="obj"}, or a `np.Array`{.interpreted-text role="obj"}. Anything
works as long as it provides strings.
## Using the 🤗 Datasets library
An awesome way to access one of the many datasets that exist out there
is by using the 🤗 Datasets library. For more information about it, you
should check [the official documentation
here](https://huggingface.co/docs/datasets/).
Let's start by loading our dataset:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START load_dataset",
"end-before": "END load_dataset",
"dedent": 8}
</literalinclude>
The next step is to build an iterator over this dataset. The easiest way
to do this is probably by using a generator:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START def_batch_iterator",
"end-before": "END def_batch_iterator",
"dedent": 8}
</literalinclude>
As you can see here, for improved efficiency we can actually provide a
batch of examples used to train, instead of iterating over them one by
one. By doing so, we can expect performances very similar to those we
got while training directly from files.
With our iterator ready, we just need to launch the training. In order
to improve the look of our progress bars, we can specify the total
length of the dataset:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START train_datasets",
"end-before": "END train_datasets",
"dedent": 8}
</literalinclude>
And that's it!
## Using gzip files
Since gzip files in Python can be used as iterators, it is extremely
simple to train on such files:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START single_gzip",
"end-before": "END single_gzip",
"dedent": 8}
</literalinclude>
Now if we wanted to train from multiple gzip files, it wouldn't be much
harder:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START multi_gzip",
"end-before": "END multi_gzip",
"dedent": 8}
</literalinclude>
And voilà!
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/installation.mdx | # Installation
<tokenizerslangcontent>
<python>
🤗 Tokenizers is tested on Python 3.5+.
You should install 🤗 Tokenizers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're
unfamiliar with Python virtual environments, check out the [user
guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
Create a virtual environment with the version of Python you're going to
use and activate it.
## Installation with pip
🤗 Tokenizers can be installed using pip as follows:
```bash
pip install tokenizers
```
## Installation from sources
To use this method, you need to have the Rust language installed. You
can follow [the official
guide](https://www.rust-lang.org/learn/get-started) for more
information.
If you are using a unix based OS, the installation should be as simple
as running:
```bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
```
Or you can easiy update it with the following command:
```bash
rustup update
```
Once rust is installed, we can start retrieving the sources for 🤗
Tokenizers:
```bash
git clone https://github.com/huggingface/tokenizers
```
Then we go into the python bindings folder:
```bash
cd tokenizers/bindings/python
```
At this point you should have your [virtual environment]() already
activated. In order to compile 🤗 Tokenizers, you need to:
```bash
pip install -e .
```
</python>
<rust>
## Crates.io
🤗 Tokenizers is available on [crates.io](https://crates.io/crates/tokenizers).
You just need to add it to your `Cargo.toml`:
```bash
cargo add tokenizers
```
</rust>
<node>
## Installation with npm
You can simply install 🤗 Tokenizers with npm using:
```bash
npm install tokenizers
```
</node>
</tokenizerslangcontent>
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/index.mdx | <!-- DISABLE-FRONTMATTER-SECTIONS -->
# Tokenizers
Fast State-of-the-art tokenizers, optimized for both research and
production
[🤗 Tokenizers](https://github.com/huggingface/tokenizers) provides an
implementation of today's most used tokenizers, with a focus on
performance and versatility. These tokenizers are also used in [🤗 Transformers](https://github.com/huggingface/transformers).
# Main features:
- Train new vocabularies and tokenize, using today's most used tokenizers.
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for both research and production.
- Full alignment tracking. Even with destructive normalization, it's always possible to get the part of the original sentence that corresponds to any token.
- Does all the pre-processing: Truncation, Padding, add the special tokens your model needs.
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/components.mdx | # Components
When building a Tokenizer, you can attach various types of components to
this Tokenizer in order to customize its behavior. This page lists most
provided components.
## Normalizers
A `Normalizer` is in charge of pre-processing the input string in order
to normalize it as relevant for a given use case. Some common examples
of normalization are the Unicode normalization algorithms (NFD, NFKD,
NFC & NFKC), lowercasing etc... The specificity of `tokenizers` is that
we keep track of the alignment while normalizing. This is essential to
allow mapping from the generated tokens back to the input text.
The `Normalizer` is optional.
<tokenizerslangcontent>
<python>
| Name | Description | Example |
| :--- | :--- | :--- |
| NFD | NFD unicode normalization | |
| NFKD | NFKD unicode normalization | |
| NFC | NFC unicode normalization | |
| NFKC | NFKC unicode normalization | |
| Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ` <br> Output: `hello`ὀδυσσεύς` |
| Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `"`hi`"` <br> Output: `"hi"` |
| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é` <br> Ouput: `e` |
| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this: <br> Input: `"banana"` <br> Ouput: `"benene"` |
| BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: <ul> <li>clean_text</li> <li>handle_chinese_chars</li> <li>strip_accents</li> <li>lowercase</li> </ul> | |
| Sequence | Composes multiple normalizers that will run in the provided order | `Sequence([NFKC(), Lowercase()])` |
</python>
<rust>
| Name | Description | Example |
| :--- | :--- | :--- |
| NFD | NFD unicode normalization | |
| NFKD | NFKD unicode normalization | |
| NFC | NFC unicode normalization | |
| NFKC | NFKC unicode normalization | |
| Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ` <br> Output: `hello`ὀδυσσεύς` |
| Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `"`hi`"` <br> Output: `"hi"` |
| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é` <br> Ouput: `e` |
| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this: <br> Input: `"banana"` <br> Ouput: `"benene"` |
| BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: <ul> <li>clean_text</li> <li>handle_chinese_chars</li> <li>strip_accents</li> <li>lowercase</li> </ul> | |
| Sequence | Composes multiple normalizers that will run in the provided order | `Sequence::new(vec![NFKC, Lowercase])` |
</rust>
<node>
| Name | Description | Example |
| :--- | :--- | :--- |
| NFD | NFD unicode normalization | |
| NFKD | NFKD unicode normalization | |
| NFC | NFC unicode normalization | |
| NFKC | NFKC unicode normalization | |
| Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ` <br> Output: `hello`ὀδυσσεύς` |
| Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `"`hi`"` <br> Output: `"hi"` |
| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é` <br> Ouput: `e` |
| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this: <br> Input: `"banana"` <br> Ouput: `"benene"` |
| BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: <ul> <li>cleanText</li> <li>handleChineseChars</li> <li>stripAccents</li> <li>lowercase</li> </ul> | |
| Sequence | Composes multiple normalizers that will run in the provided order | |
</node>
</tokenizerslangcontent>
## Pre-tokenizers
The `PreTokenizer` takes care of splitting the input according to a set
of rules. This pre-processing lets you ensure that the underlying
`Model` does not build tokens across multiple "splits". For example if
you don't want to have whitespaces inside a token, then you can have a
`PreTokenizer` that splits on these whitespaces.
You can easily combine multiple `PreTokenizer` together using a
`Sequence` (see below). The `PreTokenizer` is also allowed to modify the
string, just like a `Normalizer` does. This is necessary to allow some
complicated algorithms that require to split before normalizing (e.g.
the ByteLevel)
<tokenizerslangcontent>
<python>
| Name | Description | Example |
| :--- | :--- | :--- |
| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: <ul> <li>Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.</li> <li>A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)</li> <li>For non ascii characters, it gets completely unreadable, but it works nonetheless!</li> </ul> | Input: `"Hello my friend, how are you?"` <br> Ouput: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` |
| Whitespace | Splits on word boundaries (using the following regular expression: `\w+|[^\w\s]+` | Input: `"Hello there!"` <br> Output: `"Hello", "there", "!"` |
| WhitespaceSplit | Splits on any whitespace character | Input: `"Hello there!"` <br> Output: `"Hello", "there!"` |
| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"` <br> Ouput: `"Hello", "?"` |
| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"` <br> Ouput: `"Hello", "▁there"` |
| CharDelimiterSplit | Splits on a given character | Example with `x`: <br> Input: `"Helloxthere"` <br> Ouput: `"Hello", "there"` |
| Digits | Splits the numbers from any other characters. | Input: `"Hello123there"` <br> Output: ``"Hello", "123", "there"`` |
| Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary. <ul> <li>pattern should be either a custom string or regexp.</li> <li>behavior should be one of: <ul><li>removed</li><li>isolated</li><li>merged_with_previous</li><li>merged_with_next</li><li>contiguous</li></ul></li> <li>invert should be a boolean flag.</li> </ul> | Example with pattern = ` `, behavior = `"isolated"`, invert = `False`: <br> Input: `"Hello, how are you?"` <br> Output: `"Hello,", " ", "how", " ", "are", " ", "you?"` |
| Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | `Sequence([Punctuation(), WhitespaceSplit()])` |
</python>
<rust>
| Name | Description | Example |
| :--- | :--- | :--- |
| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: <ul> <li>Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.</li> <li>A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)</li> <li>For non ascii characters, it gets completely unreadable, but it works nonetheless!</li> </ul> | Input: `"Hello my friend, how are you?"` <br> Ouput: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` |
| Whitespace | Splits on word boundaries (using the following regular expression: `\w+|[^\w\s]+` | Input: `"Hello there!"` <br> Output: `"Hello", "there", "!"` |
| WhitespaceSplit | Splits on any whitespace character | Input: `"Hello there!"` <br> Output: `"Hello", "there!"` |
| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"` <br> Ouput: `"Hello", "?"` |
| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"` <br> Ouput: `"Hello", "▁there"` |
| CharDelimiterSplit | Splits on a given character | Example with `x`: <br> Input: `"Helloxthere"` <br> Ouput: `"Hello", "there"` |
| Digits | Splits the numbers from any other characters. | Input: `"Hello123there"` <br> Output: ``"Hello", "123", "there"`` |
| Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary. <ul> <li>pattern should be either a custom string or regexp.</li> <li>behavior should be one of: <ul><li>Removed</li><li>Isolated</li><li>MergedWithPrevious</li><li>MergedWithNext</li><li>Contiguous</li></ul></li> <li>invert should be a boolean flag.</li> </ul> | Example with pattern = ` `, behavior = `"isolated"`, invert = `False`: <br> Input: `"Hello, how are you?"` <br> Output: `"Hello,", " ", "how", " ", "are", " ", "you?"` |
| Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | `Sequence::new(vec![Punctuation, WhitespaceSplit])` |
</rust>
<node>
| Name | Description | Example |
| :--- | :--- | :--- |
| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: <ul> <li>Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.</li> <li>A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)</li> <li>For non ascii characters, it gets completely unreadable, but it works nonetheless!</li> </ul> | Input: `"Hello my friend, how are you?"` <br> Ouput: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` |
| Whitespace | Splits on word boundaries (using the following regular expression: `\w+|[^\w\s]+` | Input: `"Hello there!"` <br> Output: `"Hello", "there", "!"` |
| WhitespaceSplit | Splits on any whitespace character | Input: `"Hello there!"` <br> Output: `"Hello", "there!"` |
| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"` <br> Ouput: `"Hello", "?"` |
| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"` <br> Ouput: `"Hello", "▁there"` |
| CharDelimiterSplit | Splits on a given character | Example with `x`: <br> Input: `"Helloxthere"` <br> Ouput: `"Hello", "there"` |
| Digits | Splits the numbers from any other characters. | Input: `"Hello123there"` <br> Output: ``"Hello", "123", "there"`` |
| Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary. <ul> <li>pattern should be either a custom string or regexp.</li> <li>behavior should be one of: <ul><li>removed</li><li>isolated</li><li>mergedWithPrevious</li><li>mergedWithNext</li><li>contiguous</li></ul></li> <li>invert should be a boolean flag.</li> </ul> | Example with pattern = ` `, behavior = `"isolated"`, invert = `False`: <br> Input: `"Hello, how are you?"` <br> Output: `"Hello,", " ", "how", " ", "are", " ", "you?"` |
| Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | |
</node>
</tokenizerslangcontent>
## Models
Models are the core algorithms used to actually tokenize, and therefore,
they are the only mandatory component of a Tokenizer.
| Name | Description |
| :--- | :--- |
| WordLevel | This is the “classic” tokenization algorithm. It let’s you simply map words to IDs without anything fancy. This has the advantage of being really simple to use and understand, but it requires extremely large vocabularies for a good coverage. Using this `Model` requires the use of a `PreTokenizer`. No choice will be made by this model directly, it simply maps input tokens to IDs. |
| BPE | One of the most popular subword tokenization algorithm. The Byte-Pair-Encoding works by starting with characters, while merging those that are the most frequently seen together, thus creating new tokens. It then works iteratively to build new tokens out of the most frequent pairs it sees in a corpus. BPE is able to build words it has never seen by using multiple subword tokens, and thus requires smaller vocabularies, with less chances of having “unk” (unknown) tokens. |
| WordPiece | This is a subword tokenization algorithm quite similar to BPE, used mainly by Google in models like BERT. It uses a greedy algorithm, that tries to build long words first, splitting in multiple tokens when entire words don’t exist in the vocabulary. This is different from BPE that starts from characters, building bigger tokens as possible. It uses the famous `##` prefix to identify tokens that are part of a word (ie not starting a word). |
| Unigram | Unigram is also a subword tokenization algorithm, and works by trying to identify the best set of subword tokens to maximize the probability for a given sentence. This is different from BPE in the way that this is not deterministic based on a set of rules applied sequentially. Instead Unigram will be able to compute multiple ways of tokenizing, while choosing the most probable one. |
## Post-Processors
After the whole pipeline, we sometimes want to insert some special
tokens before feed a tokenized string into a model like "[CLS] My
horse is amazing [SEP]". The `PostProcessor` is the component doing
just that.
| Name | Description | Example |
| :--- | :--- | :--- |
| TemplateProcessing | Let’s you easily template the post processing, adding special tokens, and specifying the `type_id` for each sequence/special token. The template is given two strings representing the single sequence and the pair of sequences, as well as a set of special tokens to use. | Example, when specifying a template with these values:<br> <ul> <li> single: `"[CLS] $A [SEP]"` </li> <li> pair: `"[CLS] $A [SEP] $B [SEP]"` </li> <li> special tokens: <ul> <li>`"[CLS]"`</li> <li>`"[SEP]"`</li> </ul> </li> </ul> <br> Input: `("I like this", "but not this")` <br> Output: `"[CLS] I like this [SEP] but not this [SEP]"` |
## Decoders
The Decoder knows how to go from the IDs used by the Tokenizer, back to
a readable piece of text. Some `Normalizer` and `PreTokenizer` use
special characters or identifiers that need to be reverted for example.
| Name | Description |
| :--- | :--- |
| ByteLevel | Reverts the ByteLevel PreTokenizer. This PreTokenizer encodes at the byte-level, using a set of visible Unicode characters to represent each byte, so we need a Decoder to revert this process and get something readable again. |
| Metaspace | Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifer `▁` to identify whitespaces, and so this Decoder helps with decoding these. |
| WordPiece | Reverts the WordPiece Model. This model uses a special identifier `##` for continuing subwords, and so this Decoder helps with decoding these. |
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/_toctree.yml | - sections:
- local: index
title: 🤗 Tokenizers
- local: quicktour
title: Quicktour
- local: installation
title: Installation
- local: pipeline
title: The tokenization pipeline
- local: components
title: Components
- local: training_from_memory
title: Training from memory
title: Getting started
- sections:
- local: api/input-sequences
title: Input Sequences
- local: api/encode-inputs
title: Encode Inputs
- local: api/tokenizer
title: Tokenizer
- local: api/encoding
title: Encoding
- local: api/added-tokens
title: Added Tokens
- local: api/models
title: Models
- local: api/normalizers
title: Normalizers
- local: api/pre-tokenizers
title: Pre-tokenizers
- local: api/post-processors
title: Post-processors
- local: api/trainers
title: Trainers
- local: api/decoders
title: Decoders
- local: api/visualizer
title: Visualizer
title: API
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/pipeline.mdx | # The tokenization pipeline
When calling `Tokenizer.encode` or
`Tokenizer.encode_batch`, the input
text(s) go through the following pipeline:
- `normalization`
- `pre-tokenization`
- `model`
- `post-processing`
We'll see in details what happens during each of those steps in detail,
as well as when you want to `decode <decoding>` some token ids, and how the 🤗 Tokenizers library allows you
to customize each of those steps to your needs. If you're already
familiar with those steps and want to learn by seeing some code, jump to
`our BERT from scratch example <example>`.
For the examples that require a `Tokenizer` we will use the tokenizer we trained in the
`quicktour`, which you can load with:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 12}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_reload_tokenizer",
"end-before": "END pipeline_reload_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Normalization
Normalization is, in a nutshell, a set of operations you apply to a raw
string to make it less random or "cleaner". Common operations include
stripping whitespace, removing accented characters or lowercasing all
text. If you're familiar with [Unicode
normalization](https://unicode.org/reports/tr15), it is also a very
common normalization operation applied in most tokenizers.
Each normalization operation is represented in the 🤗 Tokenizers library
by a `Normalizer`, and you can combine
several of those by using a `normalizers.Sequence`. Here is a normalizer applying NFD Unicode normalization
and removing accents as an example:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_normalizer",
"end-before": "END setup_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_normalizer",
"end-before": "END pipeline_setup_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_normalizer",
"end-before": "END setup_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
You can manually test that normalizer by applying it to any string:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START test_normalizer",
"end-before": "END test_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_test_normalizer",
"end-before": "END pipeline_test_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START test_normalizer",
"end-before": "END test_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
When building a `Tokenizer`, you can
customize its normalizer by just changing the corresponding attribute:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START replace_normalizer",
"end-before": "END replace_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_replace_normalizer",
"end-before": "END pipeline_replace_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START replace_normalizer",
"end-before": "END replace_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Of course, if you change the way a tokenizer applies normalization, you
should probably retrain it from scratch afterward.
## Pre-Tokenization
Pre-tokenization is the act of splitting a text into smaller objects
that give an upper bound to what your tokens will be at the end of
training. A good way to think of this is that the pre-tokenizer will
split your text into "words" and then, your final tokens will be parts
of those words.
An easy way to pre-tokenize inputs is to split on spaces and
punctuations, which is done by the
`pre_tokenizers.Whitespace`
pre-tokenizer:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_pre_tokenizer",
"end-before": "END setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_pre_tokenizer",
"end-before": "END pipeline_setup_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_pre_tokenizer",
"end-before": "END setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The output is a list of tuples, with each tuple containing one word and
its span in the original sentence (which is used to determine the final
`offsets` of our `Encoding`). Note that splitting on
punctuation will split contractions like `"I'm"` in this example.
You can combine together any `PreTokenizer` together. For instance, here is a pre-tokenizer that will
split on space, punctuation and digits, separating numbers in their
individual digits:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START combine_pre_tokenizer",
"end-before": "END combine_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_combine_pre_tokenizer",
"end-before": "END pipeline_combine_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START combine_pre_tokenizer",
"end-before": "END combine_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
As we saw in the `quicktour`, you can
customize the pre-tokenizer of a `Tokenizer` by just changing the corresponding attribute:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START replace_pre_tokenizer",
"end-before": "END replace_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_replace_pre_tokenizer",
"end-before": "END pipeline_replace_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START replace_pre_tokenizer",
"end-before": "END replace_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Of course, if you change the way the pre-tokenizer, you should probably
retrain your tokenizer from scratch afterward.
## Model
Once the input texts are normalized and pre-tokenized, the
`Tokenizer` applies the model on the
pre-tokens. This is the part of the pipeline that needs training on your
corpus (or that has been trained if you are using a pretrained
tokenizer).
The role of the model is to split your "words" into tokens, using the
rules it has learned. It's also responsible for mapping those tokens to
their corresponding IDs in the vocabulary of the model.
This model is passed along when intializing the
`Tokenizer` so you already know how to
customize this part. Currently, the 🤗 Tokenizers library supports:
- `models.BPE`
- `models.Unigram`
- `models.WordLevel`
- `models.WordPiece`
For more details about each model and its behavior, you can check
[here](components#models)
## Post-Processing
Post-processing is the last step of the tokenization pipeline, to
perform any additional transformation to the
`Encoding` before it's returned, like
adding potential special tokens.
As we saw in the quick tour, we can customize the post processor of a
`Tokenizer` by setting the
corresponding attribute. For instance, here is how we can post-process
to make the inputs suitable for the BERT model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_processor",
"end-before": "END setup_processor",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_processor",
"end-before": "END pipeline_setup_processor",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_processor",
"end-before": "END setup_processor",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Note that contrarily to the pre-tokenizer or the normalizer, you don't
need to retrain a tokenizer after changing its post-processor.
## All together: a BERT tokenizer from scratch
Let's put all those pieces together to build a BERT tokenizer. First,
BERT relies on WordPiece, so we instantiate a new
`Tokenizer` with this model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Then we know that BERT preprocesses texts by removing accents and
lowercasing. We also use a unicode normalizer:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The pre-tokenizer is just splitting on whitespace and punctuation:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
And the post-processing uses the template we saw in the previous
section:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
We can use this tokenizer and train on it on wikitext like in the
`quicktour`:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Decoding
On top of encoding the input texts, a `Tokenizer` also has an API for decoding, that is converting IDs
generated by your model back to a text. This is done by the methods
`Tokenizer.decode` (for one predicted text) and `Tokenizer.decode_batch` (for a batch of predictions).
The `decoder` will first convert the IDs back to tokens
(using the tokenizer's vocabulary) and remove all special tokens, then
join those tokens with spaces:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START test_decoding",
"end-before": "END test_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_test_decoding",
"end-before": "END pipeline_test_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START test_decoding",
"end-before": "END test_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
If you used a model that added special characters to represent subtokens
of a given "word" (like the `"##"` in
WordPiece) you will need to customize the `decoder` to treat
them properly. If we take our previous `bert_tokenizer` for instance the
default decoding will give:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
But by changing it to a proper decoder, we get:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
| 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/visualizer.mdx | # Visualizer
<tokenizerslangcontent>
<python>
## Annotation
[[autodoc]] tokenizers.tools.Annotation
## EncodingVisualizer
[[autodoc]] tokenizers.tools.EncodingVisualizer
- __call__
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/post-processors.mdx | # Post-processors
<tokenizerslangcontent>
<python>
## BertProcessing
[[autodoc]] tokenizers.processors.BertProcessing
## ByteLevel
[[autodoc]] tokenizers.processors.ByteLevel
## RobertaProcessing
[[autodoc]] tokenizers.processors.RobertaProcessing
## TemplateProcessing
[[autodoc]] tokenizers.processors.TemplateProcessing
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/added-tokens.mdx | # Added Tokens
<tokenizerslangcontent>
<python>
## AddedToken
[[autodoc]] tokenizers.AddedToken
- content
- lstrip
- normalized
- rstrip
- single_word
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/tokenizer.mdx | # Tokenizer
<tokenizerslangcontent>
<python>
## Tokenizer
[[autodoc]] tokenizers.Tokenizer
- all
- decoder
- model
- normalizer
- padding
- post_processor
- pre_tokenizer
- truncation
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/models.mdx | # Models
<tokenizerslangcontent>
<python>
## BPE
[[autodoc]] tokenizers.models.BPE
## Model
[[autodoc]] tokenizers.models.Model
## Unigram
[[autodoc]] tokenizers.models.Unigram
## WordLevel
[[autodoc]] tokenizers.models.WordLevel
## WordPiece
[[autodoc]] tokenizers.models.WordPiece
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/normalizers.mdx | # Normalizers
<tokenizerslangcontent>
<python>
## BertNormalizer
[[autodoc]] tokenizers.normalizers.BertNormalizer
## Lowercase
[[autodoc]] tokenizers.normalizers.Lowercase
## NFC
[[autodoc]] tokenizers.normalizers.NFC
## NFD
[[autodoc]] tokenizers.normalizers.NFD
## NFKC
[[autodoc]] tokenizers.normalizers.NFKC
## NFKD
[[autodoc]] tokenizers.normalizers.NFKD
## Nmt
[[autodoc]] tokenizers.normalizers.Nmt
## Normalizer
[[autodoc]] tokenizers.normalizers.Normalizer
## Precompiled
[[autodoc]] tokenizers.normalizers.Precompiled
## Replace
[[autodoc]] tokenizers.normalizers.Replace
## Sequence
[[autodoc]] tokenizers.normalizers.Sequence
## Strip
[[autodoc]] tokenizers.normalizers.Strip
## StripAccents
[[autodoc]] tokenizers.normalizers.StripAccents
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/decoders.mdx | # Decoders
<tokenizerslangcontent>
<python>
## BPEDecoder
[[autodoc]] tokenizers.decoders.BPEDecoder
## ByteLevel
[[autodoc]] tokenizers.decoders.ByteLevel
## CTC
[[autodoc]] tokenizers.decoders.CTC
## Metaspace
[[autodoc]] tokenizers.decoders.Metaspace
## WordPiece
[[autodoc]] tokenizers.decoders.WordPiece
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/encode-inputs.mdx | # Encode Inputs
<tokenizerslangcontent>
<python>
These types represent all the different kinds of input that a [`~tokenizers.Tokenizer`] accepts
when using [`~tokenizers.Tokenizer.encode_batch`].
## TextEncodeInput[[[[tokenizers.TextEncodeInput]]]]
<code>tokenizers.TextEncodeInput</code>
Represents a textual input for encoding. Can be either:
- A single sequence: [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence)
- A pair of sequences:
- A Tuple of [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence)
- Or a List of [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence) of size 2
alias of `Union[str, Tuple[str, str], List[str]]`.
## PreTokenizedEncodeInput[[[[tokenizers.PreTokenizedEncodeInput]]]]
<code>tokenizers.PreTokenizedEncodeInput</code>
Represents a pre-tokenized input for encoding. Can be either:
- A single sequence: [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence)
- A pair of sequences:
- A Tuple of [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence)
- Or a List of [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence) of size 2
alias of `Union[List[str], Tuple[str], Tuple[Union[List[str], Tuple[str]], Union[List[str], Tuple[str]]], List[Union[List[str], Tuple[str]]]]`.
## EncodeInput[[[[tokenizers.EncodeInput]]]]
<code>tokenizers.EncodeInput</code>
Represents all the possible types of input for encoding. Can be:
- When `is_pretokenized=False`: [TextEncodeInput](#tokenizers.TextEncodeInput)
- When `is_pretokenized=True`: [PreTokenizedEncodeInput](#tokenizers.PreTokenizedEncodeInput)
alias of `Union[str, Tuple[str, str], List[str], Tuple[str], Tuple[Union[List[str], Tuple[str]], Union[List[str], Tuple[str]]], List[Union[List[str], Tuple[str]]]]`.
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/trainers.mdx | # Trainers
<tokenizerslangcontent>
<python>
## BpeTrainer
[[autodoc]] tokenizers.trainers.BpeTrainer
## UnigramTrainer
[[autodoc]] tokenizers.trainers.UnigramTrainer
## WordLevelTrainer
[[autodoc]] tokenizers.trainers.WordLevelTrainer
## WordPieceTrainer
[[autodoc]] tokenizers.trainers.WordPieceTrainer
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/pre-tokenizers.mdx | # Pre-tokenizers
<tokenizerslangcontent>
<python>
## BertPreTokenizer
[[autodoc]] tokenizers.pre_tokenizers.BertPreTokenizer
## ByteLevel
[[autodoc]] tokenizers.pre_tokenizers.ByteLevel
## CharDelimiterSplit
[[autodoc]] tokenizers.pre_tokenizers.CharDelimiterSplit
## Digits
[[autodoc]] tokenizers.pre_tokenizers.Digits
## Metaspace
[[autodoc]] tokenizers.pre_tokenizers.Metaspace
## PreTokenizer
[[autodoc]] tokenizers.pre_tokenizers.PreTokenizer
## Punctuation
[[autodoc]] tokenizers.pre_tokenizers.Punctuation
## Sequence
[[autodoc]] tokenizers.pre_tokenizers.Sequence
## Split
[[autodoc]] tokenizers.pre_tokenizers.Split
## UnicodeScripts
[[autodoc]] tokenizers.pre_tokenizers.UnicodeScripts
## Whitespace
[[autodoc]] tokenizers.pre_tokenizers.Whitespace
## WhitespaceSplit
[[autodoc]] tokenizers.pre_tokenizers.WhitespaceSplit
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/encoding.mdx | # Encoding
<tokenizerslangcontent>
<python>
## Encoding
[[autodoc]] tokenizers.Encoding
- all
- attention_mask
- ids
- n_sequences
- offsets
- overflowing
- sequence_ids
- special_tokens_mask
- tokens
- type_ids
- word_ids
- words
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/input-sequences.mdx | # Input Sequences
<tokenizerslangcontent>
<python>
These types represent all the different kinds of sequence that can be used as input of a Tokenizer.
Globally, any sequence can be either a string or a list of strings, according to the operating
mode of the tokenizer: `raw text` vs `pre-tokenized`.
## TextInputSequence[[tokenizers.TextInputSequence]]
<code>tokenizers.TextInputSequence</code>
A `str` that represents an input sequence
## PreTokenizedInputSequence[[tokenizers.PreTokenizedInputSequence]]
<code>tokenizers.PreTokenizedInputSequence</code>
A pre-tokenized input sequence. Can be one of:
- A `List` of `str`
- A `Tuple` of `str`
alias of `Union[List[str], Tuple[str]]`.
## InputSequence[[tokenizers.InputSequence]]
<code>tokenizers.InputSequence</code>
Represents all the possible types of input sequences for encoding. Can be:
- When `is_pretokenized=False`: [TextInputSequence](#tokenizers.TextInputSequence)
- When `is_pretokenized=True`: [PreTokenizedInputSequence](#tokenizers.PreTokenizedInputSequence)
alias of `Union[str, List[str], Tuple[str]]`.
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos | hf_public_repos/text-generation-inference/.dockerignore | aml
target
server/transformers
server/flash-attention
| 0 |
hf_public_repos | hf_public_repos/text-generation-inference/update_doc.py | import subprocess
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--check", action="store_true")
args = parser.parse_args()
output = subprocess.check_output(["text-generation-launcher", "--help"]).decode(
"utf-8"
)
wrap_code_blocks_flag = "<!-- WRAP CODE BLOCKS -->"
final_doc = f"# Text-generation-launcher arguments\n\n{wrap_code_blocks_flag}\n\n"
lines = output.split("\n")
header = ""
block = []
for line in lines:
if line.startswith(" -") or line.startswith(" -"):
rendered_block = "\n".join(block)
if header:
final_doc += f"## {header}\n```shell\n{rendered_block}\n```\n"
else:
final_doc += f"```shell\n{rendered_block}\n```\n"
block = []
tokens = line.split("<")
if len(tokens) > 1:
header = tokens[-1][:-1]
else:
header = line.split("--")[-1]
header = header.upper().replace("-", "_")
block.append(line)
rendered_block = "\n".join(block)
final_doc += f"## {header}\n```shell\n{rendered_block}\n```\n"
block = []
filename = "docs/source/basic_tutorials/launcher.md"
if args.check:
with open(filename, "r") as f:
doc = f.read()
if doc != final_doc:
tmp = "launcher.md"
with open(tmp, "w") as g:
g.write(final_doc)
diff = subprocess.run(
["diff", tmp, filename], capture_output=True
).stdout.decode("utf-8")
print(diff)
raise Exception(
"Doc is not up-to-date, run `python update_doc.py` in order to update it"
)
else:
with open(filename, "w") as f:
f.write(final_doc)
if __name__ == "__main__":
main()
| 0 |
hf_public_repos | hf_public_repos/text-generation-inference/Dockerfile | # Rust builder
FROM lukemathwalker/cargo-chef:latest-rust-1.71 AS chef
WORKDIR /usr/src
ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse
FROM chef as planner
COPY Cargo.toml Cargo.toml
COPY rust-toolchain.toml rust-toolchain.toml
COPY proto proto
COPY benchmark benchmark
COPY router router
COPY launcher launcher
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
ARG GIT_SHA
ARG DOCKER_LABEL
RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \
unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \
unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \
rm -f $PROTOC_ZIP
COPY --from=planner /usr/src/recipe.json recipe.json
RUN cargo chef cook --release --recipe-path recipe.json
COPY Cargo.toml Cargo.toml
COPY rust-toolchain.toml rust-toolchain.toml
COPY proto proto
COPY benchmark benchmark
COPY router router
COPY launcher launcher
RUN cargo build --release
# Python builder
# Adapted from: https://github.com/pytorch/pytorch/blob/master/Dockerfile
FROM nvidia/cuda:12.1.0-devel-ubuntu20.04 as pytorch-install
ARG PYTORCH_VERSION=2.1.1
ARG PYTHON_VERSION=3.10
# Keep in sync with `server/pyproject.toml
ARG CUDA_VERSION=12.1
ARG MAMBA_VERSION=23.3.1-1
ARG CUDA_CHANNEL=nvidia
ARG INSTALL_CHANNEL=pytorch
# Automatically set by buildx
ARG TARGETPLATFORM
ENV PATH /opt/conda/bin:$PATH
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
ccache \
curl \
git && \
rm -rf /var/lib/apt/lists/*
# Install conda
# translating Docker's TARGETPLATFORM into mamba arches
RUN case ${TARGETPLATFORM} in \
"linux/arm64") MAMBA_ARCH=aarch64 ;; \
*) MAMBA_ARCH=x86_64 ;; \
esac && \
curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh"
RUN chmod +x ~/mambaforge.sh && \
bash ~/mambaforge.sh -b -p /opt/conda && \
rm ~/mambaforge.sh
# Install pytorch
# On arm64 we exit with an error code
RUN case ${TARGETPLATFORM} in \
"linux/arm64") exit 1 ;; \
*) /opt/conda/bin/conda update -y conda && \
/opt/conda/bin/conda install -c "${INSTALL_CHANNEL}" -c "${CUDA_CHANNEL}" -y "python=${PYTHON_VERSION}" "pytorch=$PYTORCH_VERSION" "pytorch-cuda=$(echo $CUDA_VERSION | cut -d'.' -f 1-2)" ;; \
esac && \
/opt/conda/bin/conda clean -ya
# CUDA kernels builder image
FROM pytorch-install as kernel-builder
ARG MAX_JOBS=8
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ninja-build \
&& rm -rf /var/lib/apt/lists/*
# Build Flash Attention CUDA kernels
FROM kernel-builder as flash-att-builder
WORKDIR /usr/src
COPY server/Makefile-flash-att Makefile
# Build specific version of flash attention
RUN make build-flash-attention
# Build Flash Attention v2 CUDA kernels
FROM kernel-builder as flash-att-v2-builder
WORKDIR /usr/src
COPY server/Makefile-flash-att-v2 Makefile
# Build specific version of flash attention v2
RUN make build-flash-attention-v2-cuda
# Build Transformers exllama kernels
FROM kernel-builder as exllama-kernels-builder
WORKDIR /usr/src
COPY server/exllama_kernels/ .
RUN TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" python setup.py build
# Build Transformers exllama kernels
FROM kernel-builder as exllamav2-kernels-builder
WORKDIR /usr/src
COPY server/exllamav2_kernels/ .
# Build specific version of transformers
RUN TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" python setup.py build
# Build Transformers awq kernels
FROM kernel-builder as awq-kernels-builder
WORKDIR /usr/src
COPY server/Makefile-awq Makefile
# Build specific version of transformers
RUN TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" make build-awq
# Build eetq kernels
FROM kernel-builder as eetq-kernels-builder
WORKDIR /usr/src
COPY server/Makefile-eetq Makefile
# Build specific version of transformers
RUN TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" make build-eetq
# Build Transformers CUDA kernels
FROM kernel-builder as custom-kernels-builder
WORKDIR /usr/src
COPY server/custom_kernels/ .
# Build specific version of transformers
RUN python setup.py build
# Build vllm CUDA kernels
FROM kernel-builder as vllm-builder
WORKDIR /usr/src
COPY server/Makefile-vllm Makefile
# Build specific version of vllm
RUN make build-vllm-cuda
# Build megablocks
FROM kernel-builder as megablocks-builder
RUN pip install git+https://github.com/OlivierDehaene/megablocks@181709df192de9a941fdf3a641cdc65a0462996e
# Text Generation Inference base image
FROM nvidia/cuda:12.1.0-base-ubuntu20.04 as base
# Conda env
ENV PATH=/opt/conda/bin:$PATH \
CONDA_PREFIX=/opt/conda
# Text Generation Inference base env
ENV HUGGINGFACE_HUB_CACHE=/data \
HF_HUB_ENABLE_HF_TRANSFER=1 \
PORT=80
WORKDIR /usr/src
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libssl-dev \
ca-certificates \
make \
curl \
&& rm -rf /var/lib/apt/lists/*
# Copy conda with PyTorch and Megablocks installed
COPY --from=megablocks-builder /opt/conda /opt/conda
# Copy build artifacts from flash attention builder
COPY --from=flash-att-builder /usr/src/flash-attention/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
COPY --from=flash-att-builder /usr/src/flash-attention/csrc/layer_norm/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
COPY --from=flash-att-builder /usr/src/flash-attention/csrc/rotary/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from flash attention v2 builder
COPY --from=flash-att-v2-builder /usr/src/flash-attention-v2/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from custom kernels builder
COPY --from=custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from exllama kernels builder
COPY --from=exllama-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from exllamav2 kernels builder
COPY --from=exllamav2-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from awq kernels builder
COPY --from=awq-kernels-builder /usr/src/llm-awq/awq/kernels/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from eetq kernels builder
COPY --from=eetq-kernels-builder /usr/src/eetq/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy builds artifacts from vllm builder
COPY --from=vllm-builder /usr/src/vllm/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Install flash-attention dependencies
RUN pip install einops --no-cache-dir
# Install server
COPY proto proto
COPY server server
COPY server/Makefile server/Makefile
RUN cd server && \
make gen-server && \
pip install -r requirements_cuda.txt && \
pip install ".[bnb, accelerate, quantize, peft]" --no-cache-dir
# Install benchmarker
COPY --from=builder /usr/src/target/release/text-generation-benchmark /usr/local/bin/text-generation-benchmark
# Install router
COPY --from=builder /usr/src/target/release/text-generation-router /usr/local/bin/text-generation-router
# Install launcher
COPY --from=builder /usr/src/target/release/text-generation-launcher /usr/local/bin/text-generation-launcher
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
build-essential \
g++ \
&& rm -rf /var/lib/apt/lists/*
# AWS Sagemaker compatible image
FROM base as sagemaker
COPY sagemaker-entrypoint.sh entrypoint.sh
RUN chmod +x entrypoint.sh
ENTRYPOINT ["./entrypoint.sh"]
# Final image
FROM base
ENTRYPOINT ["text-generation-launcher"]
CMD ["--json-output"]
| 0 |
hf_public_repos | hf_public_repos/text-generation-inference/README.md | <div align="center">
<a href="https://www.youtube.com/watch?v=jlMAX2Oaht0">
<img width=560 width=315 alt="Making TGI deployment optimal" src="https://huggingface.co/datasets/Narsil/tgi_assets/resolve/main/thumbnail.png">
</a>
# Text Generation Inference
<a href="https://github.com/huggingface/text-generation-inference">
<img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/huggingface/text-generation-inference?style=social">
</a>
<a href="https://huggingface.github.io/text-generation-inference">
<img alt="Swagger API documentation" src="https://img.shields.io/badge/API-Swagger-informational">
</a>
A Rust, Python and gRPC server for text generation inference. Used in production at [HuggingFace](https://huggingface.co)
to power Hugging Chat, the Inference API and Inference Endpoint.
</div>
## Table of contents
- [Get Started](#get-started)
- [API Documentation](#api-documentation)
- [Using a private or gated model](#using-a-private-or-gated-model)
- [A note on Shared Memory](#a-note-on-shared-memory-shm)
- [Distributed Tracing](#distributed-tracing)
- [Local Install](#local-install)
- [CUDA Kernels](#cuda-kernels)
- [Optimized architectures](#optimized-architectures)
- [Run Falcon](#run-falcon)
- [Run](#run)
- [Quantization](#quantization)
- [Develop](#develop)
- [Testing](#testing)
Text Generation Inference (TGI) is a toolkit for deploying and serving Large Language Models (LLMs). TGI enables high-performance text generation for the most popular open-source LLMs, including Llama, Falcon, StarCoder, BLOOM, GPT-NeoX, and [more](https://huggingface.co/docs/text-generation-inference/supported_models). TGI implements many features, such as:
- Simple launcher to serve most popular LLMs
- Production ready (distributed tracing with Open Telemetry, Prometheus metrics)
- Tensor Parallelism for faster inference on multiple GPUs
- Token streaming using Server-Sent Events (SSE)
- Continuous batching of incoming requests for increased total throughput
- Optimized transformers code for inference using [Flash Attention](https://github.com/HazyResearch/flash-attention) and [Paged Attention](https://github.com/vllm-project/vllm) on the most popular architectures
- Quantization with [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) and [GPT-Q](https://arxiv.org/abs/2210.17323)
- [Safetensors](https://github.com/huggingface/safetensors) weight loading
- Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
- Logits warper (temperature scaling, top-p, top-k, repetition penalty, more details see [transformers.LogitsProcessor](https://huggingface.co/docs/transformers/internal/generation_utils#transformers.LogitsProcessor))
- Stop sequences
- Log probabilities
- Custom Prompt Generation: Easily generate text by providing custom prompts to guide the model's output
- Fine-tuning Support: Utilize fine-tuned models for specific tasks to achieve higher accuracy and performance
## Get Started
### Docker
For a detailed starting guide, please see the [Quick Tour](https://huggingface.co/docs/text-generation-inference/quicktour). The easiest way of getting started is using the official Docker container:
```shell
model=HuggingFaceH4/zephyr-7b-beta
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.3 --model-id $model
```
And then you can make requests like
```bash
curl 127.0.0.1:8080/generate \
-X POST \
-d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \
-H 'Content-Type: application/json'
```
**Note:** To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 12.2 or higher. For running the Docker container on a machine with no GPUs or CUDA support, it is enough to remove the `--gpus all` flag and add `--disable-custom-kernels`, please note CPU is not the intended platform for this project, so performance might be subpar.
**Note:** TGI supports AMD Instinct MI210 and MI250 GPUs. Details can be found in the [Supported Hardware documentation](https://huggingface.co/docs/text-generation-inference/supported_models#supported-hardware). To use AMD GPUs, please use `docker run --device /dev/kfd --device /dev/dri --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.3-rocm --model-id $model` instead of the command above.
To see all options to serve your models (in the [code](https://github.com/huggingface/text-generation-inference/blob/main/launcher/src/main.rs) or in the cli):
```
text-generation-launcher --help
```
### API documentation
You can consult the OpenAPI documentation of the `text-generation-inference` REST API using the `/docs` route.
The Swagger UI is also available at: [https://huggingface.github.io/text-generation-inference](https://huggingface.github.io/text-generation-inference).
### Using a private or gated model
You have the option to utilize the `HUGGING_FACE_HUB_TOKEN` environment variable for configuring the token employed by
`text-generation-inference`. This allows you to gain access to protected resources.
For example, if you want to serve the gated Llama V2 model variants:
1. Go to https://huggingface.co/settings/tokens
2. Copy your cli READ token
3. Export `HUGGING_FACE_HUB_TOKEN=<your cli READ token>`
or with Docker:
```shell
model=meta-llama/Llama-2-7b-chat-hf
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
token=<your cli READ token>
docker run --gpus all --shm-size 1g -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.3 --model-id $model
```
### A note on Shared Memory (shm)
[`NCCL`](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html) is a communication framework used by
`PyTorch` to do distributed training/inference. `text-generation-inference` make
use of `NCCL` to enable Tensor Parallelism to dramatically speed up inference for large language models.
In order to share data between the different devices of a `NCCL` group, `NCCL` might fall back to using the host memory if
peer-to-peer using NVLink or PCI is not possible.
To allow the container to use 1G of Shared Memory and support SHM sharing, we add `--shm-size 1g` on the above command.
If you are running `text-generation-inference` inside `Kubernetes`. You can also add Shared Memory to the container by
creating a volume with:
```yaml
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
```
and mounting it to `/dev/shm`.
Finally, you can also disable SHM sharing by using the `NCCL_SHM_DISABLE=1` environment variable. However, note that
this will impact performance.
### Distributed Tracing
`text-generation-inference` is instrumented with distributed tracing using OpenTelemetry. You can use this feature
by setting the address to an OTLP collector with the `--otlp-endpoint` argument.
### Architecture
![TGI architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/TGI.png)
### Local install
You can also opt to install `text-generation-inference` locally.
First [install Rust](https://rustup.rs/) and create a Python virtual environment with at least
Python 3.9, e.g. using `conda`:
```shell
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
conda create -n text-generation-inference python=3.9
conda activate text-generation-inference
```
You may also need to install Protoc.
On Linux:
```shell
PROTOC_ZIP=protoc-21.12-linux-x86_64.zip
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP
sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc
sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*'
rm -f $PROTOC_ZIP
```
On MacOS, using Homebrew:
```shell
brew install protobuf
```
Then run:
```shell
BUILD_EXTENSIONS=True make install # Install repository and HF/transformer fork with CUDA kernels
make run-falcon-7b-instruct
```
**Note:** on some machines, you may also need the OpenSSL libraries and gcc. On Linux machines, run:
```shell
sudo apt-get install libssl-dev gcc -y
```
### CUDA Kernels
The custom CUDA kernels are only tested on NVIDIA A100, AMD MI210 and AMD MI250. If you have any installation or runtime issues, you can remove
the kernels by using the `DISABLE_CUSTOM_KERNELS=True` environment variable.
Be aware that the official Docker image has them enabled by default.
## Optimized architectures
TGI works out of the box to serve optimized models in [this list](https://huggingface.co/docs/text-generation-inference/supported_models).
Other architectures are supported on a best-effort basis using:
`AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")`
or
`AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto")`
## Run Falcon
### Run
```shell
make run-falcon-7b-instruct
```
### Quantization
You can also quantize the weights with bitsandbytes to reduce the VRAM requirement:
```shell
make run-falcon-7b-instruct-quantize
```
4bit quantization is available using the [NF4 and FP4 data types from bitsandbytes](https://arxiv.org/pdf/2305.14314.pdf). It can be enabled by providing `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` as a command line argument to `text-generation-launcher`.
## Develop
```shell
make server-dev
make router-dev
```
## Testing
```shell
# python
make python-server-tests
make python-client-tests
# or both server and client tests
make python-tests
# rust cargo tests
make rust-tests
# integration tests
make integration-tests
```
| 0 |
hf_public_repos | hf_public_repos/text-generation-inference/rust-toolchain.toml | [toolchain]
channel = "1.70.0"
components = ["rustfmt", "clippy"] | 0 |
hf_public_repos | hf_public_repos/text-generation-inference/Dockerfile_amd | # Rust builder
FROM lukemathwalker/cargo-chef:latest-rust-1.71 AS chef
WORKDIR /usr/src
ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse
FROM chef as planner
COPY Cargo.toml Cargo.toml
COPY rust-toolchain.toml rust-toolchain.toml
COPY proto proto
COPY benchmark benchmark
COPY router router
COPY launcher launcher
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
ARG GIT_SHA
ARG DOCKER_LABEL
RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \
unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \
unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \
rm -f $PROTOC_ZIP
COPY --from=planner /usr/src/recipe.json recipe.json
RUN cargo chef cook --release --recipe-path recipe.json
COPY Cargo.toml Cargo.toml
COPY rust-toolchain.toml rust-toolchain.toml
COPY proto proto
COPY benchmark benchmark
COPY router router
COPY launcher launcher
RUN cargo build --release
# Text Generation Inference base image for RoCm
FROM rocm/dev-ubuntu-20.04:5.7 as base
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
ccache \
curl \
git \
make \
libssl-dev \
g++ \
# Needed to build VLLM & flash.
rocthrust-dev \
hipsparse-dev \
hipblas-dev && \
rm -rf /var/lib/apt/lists/*
# Keep in sync with `server/pyproject.toml
ARG MAMBA_VERSION=23.1.0-1
ARG PYTORCH_VERSION='2.2.0.dev0'
ARG ROCM_VERSION='5.7'
ARG PYTHON_VERSION='3.10.10'
# Automatically set by buildx
ARG TARGETPLATFORM
ENV PATH /opt/conda/bin:$PATH
# TGI seem to require libssl.so.1.1 instead of libssl.so.3 so we can't use ubuntu 22.04. Ubuntu 20.04 has python==3.8, and TGI requires python>=3.9, hence the need for miniconda.
# Install mamba
# translating Docker's TARGETPLATFORM into mamba arches
RUN case ${TARGETPLATFORM} in \
"linux/arm64") MAMBA_ARCH=aarch64 ;; \
*) MAMBA_ARCH=x86_64 ;; \
esac && \
curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh"
RUN chmod +x ~/mambaforge.sh && \
bash ~/mambaforge.sh -b -p /opt/conda && \
mamba init && \
rm ~/mambaforge.sh
# Install PyTorch nightly (2.2.0.dev2023) compiled against RoCm 5.7, as VLLM can not be compiled with RoCm 5.6.
RUN pip install --pre torch==2.2.0.dev20231106 --index-url https://download.pytorch.org/whl/nightly/rocm5.7
FROM base AS kernel-builder
# Build vllm kernels
FROM kernel-builder AS vllm-builder
WORKDIR /usr/src
COPY server/Makefile-vllm Makefile
# Build specific version of vllm
RUN make build-vllm-rocm
# Build Flash Attention v2 kernels
FROM kernel-builder AS flash-att-v2-builder
WORKDIR /usr/src
COPY server/Makefile-flash-att-v2 Makefile
# Build specific version of flash attention v2
RUN make build-flash-attention-v2-rocm
# Build Transformers CUDA kernels (gpt-neox and bloom)
FROM kernel-builder as custom-kernels-builder
WORKDIR /usr/src
COPY server/custom_kernels/ .
RUN PYTORCH_ROCM_ARCH=gfx90a python setup.py build
FROM base as base-copy
# Text Generation Inference base env
ENV HUGGINGFACE_HUB_CACHE=/data \
HF_HUB_ENABLE_HF_TRANSFER=1 \
PORT=80
# Copy builds artifacts from vllm builder
COPY --from=vllm-builder /usr/src/vllm/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from flash attention v2 builder
COPY --from=flash-att-v2-builder /usr/src/flash-attention-v2/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from custom kernels builder
COPY --from=custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Install flash-attention dependencies
RUN pip install einops --no-cache-dir
# Install server
COPY proto proto
COPY server server
COPY server/Makefile server/Makefile
RUN cd server && \
make gen-server && \
pip install -r requirements_rocm.txt && \
pip install ".[accelerate, peft]" --no-cache-dir
# Install benchmarker
COPY --from=builder /usr/src/target/release/text-generation-benchmark /usr/local/bin/text-generation-benchmark
# Install router
COPY --from=builder /usr/src/target/release/text-generation-router /usr/local/bin/text-generation-router
# Install launcher
COPY --from=builder /usr/src/target/release/text-generation-launcher /usr/local/bin/text-generation-launcher
# AWS Sagemaker compatible image
FROM base-copy as sagemaker
COPY sagemaker-entrypoint.sh entrypoint.sh
RUN chmod +x entrypoint.sh
ENTRYPOINT ["./entrypoint.sh"]
# Final image
FROM base-copy
ENTRYPOINT ["text-generation-launcher"]
CMD ["--json-output"]
| 0 |
hf_public_repos | hf_public_repos/text-generation-inference/sagemaker-entrypoint.sh | #!/bin/bash
if [[ -z "${HF_MODEL_ID}" ]]; then
echo "HF_MODEL_ID must be set"
exit 1
fi
export MODEL_ID="${HF_MODEL_ID}"
if [[ -n "${HF_MODEL_REVISION}" ]]; then
export REVISION="${HF_MODEL_REVISION}"
fi
if [[ -n "${SM_NUM_GPUS}" ]]; then
export NUM_SHARD="${SM_NUM_GPUS}"
fi
if [[ -n "${HF_MODEL_QUANTIZE}" ]]; then
export QUANTIZE="${HF_MODEL_QUANTIZE}"
fi
if [[ -n "${HF_MODEL_TRUST_REMOTE_CODE}" ]]; then
export TRUST_REMOTE_CODE="${HF_MODEL_TRUST_REMOTE_CODE}"
fi
text-generation-launcher --port 8080
| 0 |
hf_public_repos | hf_public_repos/text-generation-inference/Cargo.lock | # This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "addr2line"
version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
dependencies = [
"gimli",
]
[[package]]
name = "adler"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "ahash"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a"
dependencies = [
"cfg-if",
"once_cell",
"version_check",
"zerocopy",
]
[[package]]
name = "aho-corasick"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0"
dependencies = [
"memchr",
]
[[package]]
name = "anstream"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d664a92ecae85fd0a7392615844904654d1d5f5514837f471ddef4a057aba1b6"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"utf8parse",
]
[[package]]
name = "anstyle"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87"
[[package]]
name = "anstyle-parse"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648"
dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "anstyle-wincon"
version = "3.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7"
dependencies = [
"anstyle",
"windows-sys 0.52.0",
]
[[package]]
name = "anyhow"
version = "1.0.76"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59d2a3357dde987206219e78ecfbbb6e8dad06cbb65292758d3270e6254f7355"
[[package]]
name = "arc-swap"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6"
[[package]]
name = "async-rustls"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93b21a03b7c21702a0110f9f8d228763a533570deb376119042dabf33c37a01a"
dependencies = [
"futures-io",
"rustls 0.20.9",
"webpki",
]
[[package]]
name = "async-stream"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51"
dependencies = [
"async-stream-impl",
"futures-core",
"pin-project-lite",
]
[[package]]
name = "async-stream-impl"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "async-trait"
version = "0.1.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fdf6721fb0140e4f897002dd086c06f6c27775df19cfe1fccb21181a48fd2c98"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "average"
version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d804c74bb2d66e9b7047658d21af0f1c937d7d2466410cbf1aed3b0c04048d4"
dependencies = [
"easy-cast",
"float-ord",
"num-traits",
]
[[package]]
name = "awaitdrop"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "771051cdc7eec2dc1b23fbf870bb7fbb89136fe374227c875e377f1eed99a429"
dependencies = [
"futures",
"generational-arena",
"parking_lot",
"slotmap",
]
[[package]]
name = "axum"
version = "0.6.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf"
dependencies = [
"async-trait",
"axum-core",
"bitflags 1.3.2",
"bytes",
"futures-util",
"http",
"http-body",
"hyper",
"itoa",
"matchit",
"memchr",
"mime",
"percent-encoding",
"pin-project-lite",
"rustversion",
"serde",
"serde_json",
"serde_path_to_error",
"serde_urlencoded",
"sync_wrapper",
"tokio",
"tower",
"tower-layer",
"tower-service",
]
[[package]]
name = "axum-core"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c"
dependencies = [
"async-trait",
"bytes",
"futures-util",
"http",
"http-body",
"mime",
"rustversion",
"tower-layer",
"tower-service",
]
[[package]]
name = "axum-tracing-opentelemetry"
version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06985105829f176e9a3f113b1c71cc24e08f600ef0df4e70cd90d144f889e19f"
dependencies = [
"axum",
"futures-core",
"futures-util",
"http",
"opentelemetry",
"pin-project-lite",
"tower",
"tracing",
"tracing-opentelemetry",
"tracing-opentelemetry-instrumentation-sdk",
]
[[package]]
name = "backtrace"
version = "0.3.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
dependencies = [
"addr2line",
"cc",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
]
[[package]]
name = "base64"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
[[package]]
name = "base64"
version = "0.21.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07"
[[package]]
name = "block-buffer"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
dependencies = [
"generic-array",
]
[[package]]
name = "bumpalo"
version = "3.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec"
[[package]]
name = "bytecount"
version = "0.6.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205"
[[package]]
name = "byteorder"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223"
[[package]]
name = "cassowary"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53"
[[package]]
name = "cc"
version = "1.0.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
dependencies = [
"libc",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "clap"
version = "4.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2"
dependencies = [
"clap_builder",
"clap_derive",
]
[[package]]
name = "clap_builder"
version = "4.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb"
dependencies = [
"anstream",
"anstyle",
"clap_lex",
"strsim",
]
[[package]]
name = "clap_derive"
version = "4.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "clap_lex"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1"
[[package]]
name = "colorchoice"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "console"
version = "0.15.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8"
dependencies = [
"encode_unicode",
"lazy_static",
"libc",
"unicode-width",
"windows-sys 0.45.0",
]
[[package]]
name = "core-foundation"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
dependencies = [
"core-foundation-sys",
"libc",
]
[[package]]
name = "core-foundation-sys"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f"
[[package]]
name = "cpufeatures"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0"
dependencies = [
"libc",
]
[[package]]
name = "crc32fast"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
dependencies = [
"cfg-if",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c3242926edf34aec4ac3a77108ad4854bffaa2e4ddc1824124ce59231302d5"
dependencies = [
"cfg-if",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fca89a0e215bab21874660c67903c5f143333cab1da83d041c7ded6053774751"
dependencies = [
"cfg-if",
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d2fe95351b870527a5d09bf563ed3c97c0cffb87cf1c78a591bf48bb218d9aa"
dependencies = [
"autocfg",
"cfg-if",
"crossbeam-utils",
"memoffset",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f"
dependencies = [
"cfg-if",
]
[[package]]
name = "crossterm"
version = "0.27.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df"
dependencies = [
"bitflags 2.4.1",
"crossterm_winapi",
"libc",
"mio",
"parking_lot",
"signal-hook",
"signal-hook-mio",
"winapi",
]
[[package]]
name = "crossterm_winapi"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b"
dependencies = [
"winapi",
]
[[package]]
name = "crypto-common"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
"generic-array",
"typenum",
]
[[package]]
name = "ctrlc"
version = "3.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82e95fbd621905b854affdc67943b043a0fbb6ed7385fd5a25650d19a8a6cfdf"
dependencies = [
"nix",
"windows-sys 0.48.0",
]
[[package]]
name = "darling"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850"
dependencies = [
"darling_core",
"darling_macro",
]
[[package]]
name = "darling_core"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim",
"syn 1.0.109",
]
[[package]]
name = "darling_macro"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e"
dependencies = [
"darling_core",
"quote",
"syn 1.0.109",
]
[[package]]
name = "deranged"
version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc"
dependencies = [
"powerfmt",
]
[[package]]
name = "derive_builder"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8"
dependencies = [
"derive_builder_macro",
]
[[package]]
name = "derive_builder_core"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f"
dependencies = [
"darling",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "derive_builder_macro"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e"
dependencies = [
"derive_builder_core",
"syn 1.0.109",
]
[[package]]
name = "digest"
version = "0.10.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer",
"crypto-common",
]
[[package]]
name = "dirs"
version = "4.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059"
dependencies = [
"dirs-sys 0.3.7",
]
[[package]]
name = "dirs"
version = "5.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225"
dependencies = [
"dirs-sys 0.4.1",
]
[[package]]
name = "dirs-sys"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6"
dependencies = [
"libc",
"redox_users",
"winapi",
]
[[package]]
name = "dirs-sys"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c"
dependencies = [
"libc",
"option-ext",
"redox_users",
"windows-sys 0.48.0",
]
[[package]]
name = "easy-cast"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10936778145f3bea71fd9bf61332cce28c28e96a380714f7ab34838b80733fd6"
dependencies = [
"libm",
]
[[package]]
name = "either"
version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07"
[[package]]
name = "encode_unicode"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
[[package]]
name = "encoding_rs"
version = "0.8.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1"
dependencies = [
"cfg-if",
]
[[package]]
name = "equivalent"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "errno"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245"
dependencies = [
"libc",
"windows-sys 0.52.0",
]
[[package]]
name = "esaxx-rs"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d817e038c30374a4bcb22f94d0a8a0e216958d4c3dcde369b1439fec4bdda6e6"
dependencies = [
"cc",
]
[[package]]
name = "fastrand"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5"
[[package]]
name = "fixedbitset"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]]
name = "flate2"
version = "1.0.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e"
dependencies = [
"crc32fast",
"miniz_oxide",
]
[[package]]
name = "float-ord"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ce81f49ae8a0482e4c55ea62ebbd7e5a686af544c00b9d090bba3ff9be97b3d"
[[package]]
name = "float_eq"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28a80e3145d8ad11ba0995949bbcf48b9df2be62772b3d351ef017dff6ecb853"
[[package]]
name = "fnv"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "foreign-types"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
dependencies = [
"foreign-types-shared",
]
[[package]]
name = "foreign-types-shared"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "form_urlencoded"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
dependencies = [
"percent-encoding",
]
[[package]]
name = "futures"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335"
dependencies = [
"futures-channel",
"futures-core",
"futures-executor",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78"
dependencies = [
"futures-core",
"futures-sink",
]
[[package]]
name = "futures-core"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
[[package]]
name = "futures-executor"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc"
dependencies = [
"futures-core",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-io"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1"
[[package]]
name = "futures-macro"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "futures-sink"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5"
[[package]]
name = "futures-task"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
[[package]]
name = "futures-util"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-macro",
"futures-sink",
"futures-task",
"memchr",
"pin-project-lite",
"pin-utils",
"slab",
]
[[package]]
name = "generational-arena"
version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "877e94aff08e743b651baaea359664321055749b398adff8740a7399af7796e7"
dependencies = [
"cfg-if",
]
[[package]]
name = "generic-array"
version = "0.14.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
dependencies = [
"typenum",
"version_check",
]
[[package]]
name = "getrandom"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "gimli"
version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
[[package]]
name = "grpc-metadata"
version = "0.1.0"
dependencies = [
"opentelemetry",
"tonic 0.10.2",
"tracing",
"tracing-opentelemetry",
]
[[package]]
name = "h2"
version = "0.3.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178"
dependencies = [
"bytes",
"fnv",
"futures-core",
"futures-sink",
"futures-util",
"http",
"indexmap 2.1.0",
"slab",
"tokio",
"tokio-util",
"tracing",
]
[[package]]
name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
[[package]]
name = "hashbrown"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038"
dependencies = [
"ahash",
]
[[package]]
name = "hashbrown"
version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
[[package]]
name = "heck"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "hermit-abi"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7"
[[package]]
name = "hf-hub"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b780635574b3d92f036890d8373433d6f9fc7abb320ee42a5c25897fc8ed732"
dependencies = [
"dirs 5.0.1",
"futures",
"indicatif",
"log",
"native-tls",
"num_cpus",
"rand",
"reqwest",
"serde",
"serde_json",
"thiserror",
"tokio",
"ureq",
]
[[package]]
name = "home"
version = "0.5.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5"
dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "hostname"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867"
dependencies = [
"libc",
"match_cfg",
"winapi",
]
[[package]]
name = "http"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb"
dependencies = [
"bytes",
"fnv",
"itoa",
]
[[package]]
name = "http-body"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
dependencies = [
"bytes",
"http",
"pin-project-lite",
]
[[package]]
name = "http-range-header"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f"
[[package]]
name = "httparse"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904"
[[package]]
name = "httpdate"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
[[package]]
name = "hyper"
version = "0.14.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80"
dependencies = [
"bytes",
"futures-channel",
"futures-core",
"futures-util",
"h2",
"http",
"http-body",
"httparse",
"httpdate",
"itoa",
"pin-project-lite",
"socket2",
"tokio",
"tower-service",
"tracing",
"want",
]
[[package]]
name = "hyper-timeout"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1"
dependencies = [
"hyper",
"pin-project-lite",
"tokio",
"tokio-io-timeout",
]
[[package]]
name = "hyper-tls"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905"
dependencies = [
"bytes",
"hyper",
"native-tls",
"tokio",
"tokio-native-tls",
]
[[package]]
name = "ident_case"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "idna"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6"
dependencies = [
"unicode-bidi",
"unicode-normalization",
]
[[package]]
name = "indexmap"
version = "1.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
dependencies = [
"autocfg",
"hashbrown 0.12.3",
]
[[package]]
name = "indexmap"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f"
dependencies = [
"equivalent",
"hashbrown 0.14.3",
"serde",
]
[[package]]
name = "indicatif"
version = "0.17.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25"
dependencies = [
"console",
"instant",
"number_prefix",
"portable-atomic",
"unicode-width",
]
[[package]]
name = "indoc"
version = "2.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8"
[[package]]
name = "init-tracing-opentelemetry"
version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94bd26b1b737bc11f183620072e188d1c6ede67e0e78682228d66b49ec510e17"
dependencies = [
"opentelemetry",
"opentelemetry-otlp",
"thiserror",
"tracing",
"tracing-opentelemetry",
]
[[package]]
name = "instant"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
dependencies = [
"cfg-if",
]
[[package]]
name = "ipnet"
version = "2.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3"
[[package]]
name = "itertools"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
dependencies = [
"either",
]
[[package]]
name = "itertools"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
[[package]]
name = "js-sys"
version = "0.3.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca"
dependencies = [
"wasm-bindgen",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.151"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4"
[[package]]
name = "libm"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058"
[[package]]
name = "libredox"
version = "0.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8"
dependencies = [
"bitflags 2.4.1",
"libc",
"redox_syscall",
]
[[package]]
name = "linux-raw-sys"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456"
[[package]]
name = "lock_api"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "mach2"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709"
dependencies = [
"libc",
]
[[package]]
name = "macro_rules_attribute"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a82271f7bc033d84bbca59a3ce3e4159938cb08a9c3aebbe54d215131518a13"
dependencies = [
"macro_rules_attribute-proc_macro",
"paste",
]
[[package]]
name = "macro_rules_attribute-proc_macro"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8dd856d451cc0da70e2ef2ce95a18e39a93b7558bedf10201ad28503f918568"
[[package]]
name = "match_cfg"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4"
[[package]]
name = "matchers"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
dependencies = [
"regex-automata 0.1.10",
]
[[package]]
name = "matchit"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94"
[[package]]
name = "memchr"
version = "2.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167"
[[package]]
name = "memoffset"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
dependencies = [
"autocfg",
]
[[package]]
name = "metrics"
version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5"
dependencies = [
"ahash",
"metrics-macros",
"portable-atomic",
]
[[package]]
name = "metrics-exporter-prometheus"
version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d4fa7ce7c4862db464a37b0b31d89bca874562f034bd7993895572783d02950"
dependencies = [
"base64 0.21.5",
"hyper",
"indexmap 1.9.3",
"ipnet",
"metrics",
"metrics-util",
"quanta",
"thiserror",
"tokio",
"tracing",
]
[[package]]
name = "metrics-macros"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "metrics-util"
version = "0.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
"hashbrown 0.13.1",
"metrics",
"num_cpus",
"quanta",
"sketches-ddsketch",
]
[[package]]
name = "mime"
version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
[[package]]
name = "mime_guess"
version = "2.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef"
dependencies = [
"mime",
"unicase",
]
[[package]]
name = "minijinja"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "208758577ef2c86cf5dd3e85730d161413ec3284e2d73b2ef65d9a24d9971bcb"
dependencies = [
"serde",
]
[[package]]
name = "minimal-lexical"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
[[package]]
name = "miniz_oxide"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
dependencies = [
"adler",
]
[[package]]
name = "mio"
version = "0.8.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09"
dependencies = [
"libc",
"log",
"wasi",
"windows-sys 0.48.0",
]
[[package]]
name = "monostate"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e404e13820ea0df0eda93aa294e0c80de76a0daa6bec590d376fbec6d7810394"
dependencies = [
"monostate-impl",
"serde",
]
[[package]]
name = "monostate-impl"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "531c82a934da419bed3da09bd87d6e98c72f8d4aa755427b3b009c2b8b8c433c"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "multimap"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
[[package]]
name = "muxado"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e92b89ac3127251efde6f5a9586e5aae99468d06fcf9f133b377f58d5ed66446"
dependencies = [
"async-trait",
"awaitdrop",
"bitflags 1.3.2",
"bytes",
"futures",
"pin-project",
"rand",
"thiserror",
"tokio",
"tokio-util",
"tracing",
]
[[package]]
name = "native-tls"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e"
dependencies = [
"lazy_static",
"libc",
"log",
"openssl",
"openssl-probe",
"openssl-sys",
"schannel",
"security-framework",
"security-framework-sys",
"tempfile",
]
[[package]]
name = "ngrok"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1454b1edbc5f2c8ff3242c237cb84388b50eced8eb26b4204e49698ed6511784"
dependencies = [
"arc-swap",
"async-rustls",
"async-trait",
"awaitdrop",
"axum",
"base64 0.13.1",
"bytes",
"futures",
"hostname",
"hyper",
"muxado",
"once_cell",
"parking_lot",
"regex",
"rustls-pemfile",
"serde",
"serde_json",
"thiserror",
"tokio",
"tokio-retry",
"tokio-util",
"tracing",
"windows-sys 0.45.0",
]
[[package]]
name = "nix"
version = "0.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053"
dependencies = [
"bitflags 2.4.1",
"cfg-if",
"libc",
]
[[package]]
name = "nohash-hasher"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451"
[[package]]
name = "nom"
version = "7.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
dependencies = [
"memchr",
"minimal-lexical",
]
[[package]]
name = "ntapi"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4"
dependencies = [
"winapi",
]
[[package]]
name = "nu-ansi-term"
version = "0.46.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
dependencies = [
"overload",
"winapi",
]
[[package]]
name = "num-traits"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"
dependencies = [
"autocfg",
"libm",
]
[[package]]
name = "num_cpus"
version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43"
dependencies = [
"hermit-abi",
"libc",
]
[[package]]
name = "num_threads"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44"
dependencies = [
"libc",
]
[[package]]
name = "number_prefix"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
[[package]]
name = "object"
version = "0.32.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0"
dependencies = [
"memchr",
]
[[package]]
name = "once_cell"
version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "onig"
version = "6.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c4b31c8722ad9171c6d77d3557db078cab2bd50afcc9d09c8b315c59df8ca4f"
dependencies = [
"bitflags 1.3.2",
"libc",
"once_cell",
"onig_sys",
]
[[package]]
name = "onig_sys"
version = "69.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b829e3d7e9cc74c7e315ee8edb185bf4190da5acde74afd7fc59c35b1f086e7"
dependencies = [
"cc",
"pkg-config",
]
[[package]]
name = "openssl"
version = "0.10.62"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671"
dependencies = [
"bitflags 2.4.1",
"cfg-if",
"foreign-types",
"libc",
"once_cell",
"openssl-macros",
"openssl-sys",
]
[[package]]
name = "openssl-macros"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "openssl-probe"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
[[package]]
name = "openssl-sys"
version = "0.9.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7"
dependencies = [
"cc",
"libc",
"pkg-config",
"vcpkg",
]
[[package]]
name = "opentelemetry"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9591d937bc0e6d2feb6f71a559540ab300ea49955229c347a517a28d27784c54"
dependencies = [
"opentelemetry_api",
"opentelemetry_sdk",
]
[[package]]
name = "opentelemetry-http"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7594ec0e11d8e33faf03530a4c49af7064ebba81c1480e01be67d90b356508b"
dependencies = [
"async-trait",
"bytes",
"http",
"opentelemetry_api",
]
[[package]]
name = "opentelemetry-otlp"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e5e5a5c4135864099f3faafbe939eb4d7f9b80ebf68a8448da961b32a7c1275"
dependencies = [
"async-trait",
"futures-core",
"http",
"opentelemetry-proto",
"opentelemetry-semantic-conventions",
"opentelemetry_api",
"opentelemetry_sdk",
"prost 0.11.9",
"thiserror",
"tokio",
"tonic 0.9.2",
]
[[package]]
name = "opentelemetry-proto"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1e3f814aa9f8c905d0ee4bde026afd3b2577a97c10e1699912e3e44f0c4cbeb"
dependencies = [
"opentelemetry_api",
"opentelemetry_sdk",
"prost 0.11.9",
"tonic 0.9.2",
]
[[package]]
name = "opentelemetry-semantic-conventions"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73c9f9340ad135068800e7f1b24e9e09ed9e7143f5bf8518ded3d3ec69789269"
dependencies = [
"opentelemetry",
]
[[package]]
name = "opentelemetry_api"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a81f725323db1b1206ca3da8bb19874bbd3f57c3bcd59471bfb04525b265b9b"
dependencies = [
"futures-channel",
"futures-util",
"indexmap 1.9.3",
"js-sys",
"once_cell",
"pin-project-lite",
"thiserror",
"urlencoding",
]
[[package]]
name = "opentelemetry_sdk"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa8e705a0612d48139799fcbaba0d4a90f06277153e43dd2bdc16c6f0edd8026"
dependencies = [
"async-trait",
"crossbeam-channel",
"futures-channel",
"futures-executor",
"futures-util",
"once_cell",
"opentelemetry_api",
"ordered-float",
"percent-encoding",
"rand",
"regex",
"serde_json",
"thiserror",
"tokio",
"tokio-stream",
]
[[package]]
name = "option-ext"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
[[package]]
name = "ordered-float"
version = "3.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1e1c390732d15f1d48471625cd92d154e66db2c56645e29a9cd26f4699f72dc"
dependencies = [
"num-traits",
]
[[package]]
name = "overload"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
[[package]]
name = "papergrid"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2ccbe15f2b6db62f9a9871642746427e297b0ceb85f9a7f1ee5ff47d184d0c8"
dependencies = [
"bytecount",
"fnv",
"unicode-width",
]
[[package]]
name = "parking_lot"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.9.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
"windows-targets 0.48.5",
]
[[package]]
name = "paste"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c"
[[package]]
name = "percent-encoding"
version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "petgraph"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9"
dependencies = [
"fixedbitset",
"indexmap 2.1.0",
]
[[package]]
name = "pin-project"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422"
dependencies = [
"pin-project-internal",
]
[[package]]
name = "pin-project-internal"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "pin-project-lite"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "pkg-config"
version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a"
[[package]]
name = "portable-atomic"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0"
[[package]]
name = "powerfmt"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
[[package]]
name = "ppv-lite86"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "prettyplease"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d"
dependencies = [
"proc-macro2",
"syn 2.0.42",
]
[[package]]
name = "proc-macro-error"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
dependencies = [
"proc-macro-error-attr",
"proc-macro2",
"quote",
"syn 1.0.109",
"version_check",
]
[[package]]
name = "proc-macro-error-attr"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
dependencies = [
"proc-macro2",
"quote",
"version_check",
]
[[package]]
name = "proc-macro2"
version = "1.0.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75cb1540fadbd5b8fbccc4dddad2734eba435053f725621c070711a14bb5f4b8"
dependencies = [
"unicode-ident",
]
[[package]]
name = "prost"
version = "0.11.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd"
dependencies = [
"bytes",
"prost-derive 0.11.9",
]
[[package]]
name = "prost"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a"
dependencies = [
"bytes",
"prost-derive 0.12.3",
]
[[package]]
name = "prost-build"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2"
dependencies = [
"bytes",
"heck",
"itertools 0.11.0",
"log",
"multimap",
"once_cell",
"petgraph",
"prettyplease",
"prost 0.12.3",
"prost-types",
"regex",
"syn 2.0.42",
"tempfile",
"which",
]
[[package]]
name = "prost-derive"
version = "0.11.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4"
dependencies = [
"anyhow",
"itertools 0.10.5",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "prost-derive"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e"
dependencies = [
"anyhow",
"itertools 0.11.0",
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "prost-types"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e"
dependencies = [
"prost 0.12.3",
]
[[package]]
name = "quanta"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab"
dependencies = [
"crossbeam-utils",
"libc",
"mach2",
"once_cell",
"raw-cpuid",
"wasi",
"web-sys",
"winapi",
]
[[package]]
name = "quote"
version = "1.0.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom",
]
[[package]]
name = "ratatui"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e2e4cd95294a85c3b4446e63ef054eea43e0205b1fd60120c16b74ff7ff96ad"
dependencies = [
"bitflags 2.4.1",
"cassowary",
"crossterm",
"indoc",
"itertools 0.11.0",
"paste",
"strum",
"unicode-segmentation",
"unicode-width",
]
[[package]]
name = "raw-cpuid"
version = "10.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332"
dependencies = [
"bitflags 1.3.2",
]
[[package]]
name = "rayon"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1"
dependencies = [
"either",
"rayon-core",
]
[[package]]
name = "rayon-cond"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "059f538b55efd2309c9794130bc149c6a553db90e9d99c2030785c82f0bd7df9"
dependencies = [
"either",
"itertools 0.11.0",
"rayon",
]
[[package]]
name = "rayon-core"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
]
[[package]]
name = "redox_syscall"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"
dependencies = [
"bitflags 1.3.2",
]
[[package]]
name = "redox_users"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4"
dependencies = [
"getrandom",
"libredox",
"thiserror",
]
[[package]]
name = "regex"
version = "1.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata 0.4.3",
"regex-syntax 0.8.2",
]
[[package]]
name = "regex-automata"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
dependencies = [
"regex-syntax 0.6.29",
]
[[package]]
name = "regex-automata"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax 0.8.2",
]
[[package]]
name = "regex-syntax"
version = "0.6.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "regex-syntax"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da"
[[package]]
name = "regex-syntax"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
[[package]]
name = "reqwest"
version = "0.11.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41"
dependencies = [
"base64 0.21.5",
"bytes",
"encoding_rs",
"futures-core",
"futures-util",
"h2",
"http",
"http-body",
"hyper",
"hyper-tls",
"ipnet",
"js-sys",
"log",
"mime",
"native-tls",
"once_cell",
"percent-encoding",
"pin-project-lite",
"serde",
"serde_json",
"serde_urlencoded",
"system-configuration",
"tokio",
"tokio-native-tls",
"tower-service",
"url",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
"winreg",
]
[[package]]
name = "ring"
version = "0.16.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc"
dependencies = [
"cc",
"libc",
"once_cell",
"spin 0.5.2",
"untrusted 0.7.1",
"web-sys",
"winapi",
]
[[package]]
name = "ring"
version = "0.17.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74"
dependencies = [
"cc",
"getrandom",
"libc",
"spin 0.9.8",
"untrusted 0.9.0",
"windows-sys 0.48.0",
]
[[package]]
name = "rust-embed"
version = "6.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a36224c3276f8c4ebc8c20f158eca7ca4359c8db89991c4925132aaaf6702661"
dependencies = [
"rust-embed-impl",
"rust-embed-utils",
"walkdir",
]
[[package]]
name = "rust-embed-impl"
version = "6.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49b94b81e5b2c284684141a2fb9e2a31be90638caf040bf9afbc5a0416afe1ac"
dependencies = [
"proc-macro2",
"quote",
"rust-embed-utils",
"shellexpand",
"syn 2.0.42",
"walkdir",
]
[[package]]
name = "rust-embed-utils"
version = "7.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d38ff6bf570dc3bb7100fce9f7b60c33fa71d80e88da3f2580df4ff2bdded74"
dependencies = [
"sha2",
"walkdir",
]
[[package]]
name = "rustc-demangle"
version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]]
name = "rustc_version"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
dependencies = [
"semver",
]
[[package]]
name = "rustix"
version = "0.38.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316"
dependencies = [
"bitflags 2.4.1",
"errno",
"libc",
"linux-raw-sys",
"windows-sys 0.52.0",
]
[[package]]
name = "rustls"
version = "0.20.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99"
dependencies = [
"log",
"ring 0.16.20",
"sct",
"webpki",
]
[[package]]
name = "rustls"
version = "0.21.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba"
dependencies = [
"log",
"ring 0.17.7",
"rustls-webpki",
"sct",
]
[[package]]
name = "rustls-pemfile"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c"
dependencies = [
"base64 0.21.5",
]
[[package]]
name = "rustls-webpki"
version = "0.101.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765"
dependencies = [
"ring 0.17.7",
"untrusted 0.9.0",
]
[[package]]
name = "rustversion"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
[[package]]
name = "ryu"
version = "1.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "schannel"
version = "0.1.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88"
dependencies = [
"windows-sys 0.48.0",
]
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "sct"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414"
dependencies = [
"ring 0.17.7",
"untrusted 0.9.0",
]
[[package]]
name = "security-framework"
version = "2.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de"
dependencies = [
"bitflags 1.3.2",
"core-foundation",
"core-foundation-sys",
"libc",
"security-framework-sys",
]
[[package]]
name = "security-framework-sys"
version = "2.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a"
dependencies = [
"core-foundation-sys",
"libc",
]
[[package]]
name = "semver"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090"
[[package]]
name = "serde"
version = "1.0.193"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.193"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "serde_json"
version = "1.0.108"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "serde_path_to_error"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335"
dependencies = [
"itoa",
"serde",
]
[[package]]
name = "serde_urlencoded"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
dependencies = [
"form_urlencoded",
"itoa",
"ryu",
"serde",
]
[[package]]
name = "sha2"
version = "0.10.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8"
dependencies = [
"cfg-if",
"cpufeatures",
"digest",
]
[[package]]
name = "sharded-slab"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6"
dependencies = [
"lazy_static",
]
[[package]]
name = "shellexpand"
version = "2.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ccc8076840c4da029af4f87e4e8daeb0fca6b87bbb02e10cb60b791450e11e4"
dependencies = [
"dirs 4.0.0",
]
[[package]]
name = "signal-hook"
version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801"
dependencies = [
"libc",
"signal-hook-registry",
]
[[package]]
name = "signal-hook-mio"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af"
dependencies = [
"libc",
"mio",
"signal-hook",
]
[[package]]
name = "signal-hook-registry"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1"
dependencies = [
"libc",
]
[[package]]
name = "sketches-ddsketch"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1"
[[package]]
name = "slab"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
dependencies = [
"autocfg",
]
[[package]]
name = "slotmap"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dbff4acf519f630b3a3ddcfaea6c06b42174d9a44bc70c620e9ed1649d58b82a"
dependencies = [
"version_check",
]
[[package]]
name = "smallvec"
version = "1.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970"
[[package]]
name = "socket2"
version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9"
dependencies = [
"libc",
"windows-sys 0.48.0",
]
[[package]]
name = "spin"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
[[package]]
name = "spin"
version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
[[package]]
name = "spm_precompiled"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5851699c4033c63636f7ea4cf7b7c1f1bf06d0cc03cfb42e711de5a5c46cf326"
dependencies = [
"base64 0.13.1",
"nom",
"serde",
"unicode-segmentation",
]
[[package]]
name = "strsim"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "strum"
version = "0.25.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125"
dependencies = [
"strum_macros",
]
[[package]]
name = "strum_macros"
version = "0.25.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0"
dependencies = [
"heck",
"proc-macro2",
"quote",
"rustversion",
"syn 2.0.42",
]
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b7d0a2c048d661a1a59fcd7355baa232f7ed34e0ee4df2eef3c1c1c0d3852d8"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "sync_wrapper"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
[[package]]
name = "sysinfo"
version = "0.29.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd727fc423c2060f6c92d9534cef765c65a6ed3f428a03d7def74a8c4348e666"
dependencies = [
"cfg-if",
"core-foundation-sys",
"libc",
"ntapi",
"once_cell",
"winapi",
]
[[package]]
name = "system-configuration"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7"
dependencies = [
"bitflags 1.3.2",
"core-foundation",
"system-configuration-sys",
]
[[package]]
name = "system-configuration-sys"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9"
dependencies = [
"core-foundation-sys",
"libc",
]
[[package]]
name = "tabled"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfe9c3632da101aba5131ed63f9eed38665f8b3c68703a6bb18124835c1a5d22"
dependencies = [
"papergrid",
"tabled_derive",
"unicode-width",
]
[[package]]
name = "tabled_derive"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99f688a08b54f4f02f0a3c382aefdb7884d3d69609f785bd253dc033243e3fe4"
dependencies = [
"heck",
"proc-macro-error",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "tempfile"
version = "3.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5"
dependencies = [
"cfg-if",
"fastrand",
"redox_syscall",
"rustix",
"windows-sys 0.48.0",
]
[[package]]
name = "text-generation-benchmark"
version = "1.3.4"
dependencies = [
"average",
"clap",
"crossterm",
"float-ord",
"hf-hub",
"ratatui",
"serde",
"serde_json",
"tabled",
"text-generation-client",
"thiserror",
"tokenizers",
"tokio",
"tracing",
"tracing-subscriber",
]
[[package]]
name = "text-generation-client"
version = "1.3.4"
dependencies = [
"futures",
"grpc-metadata",
"prost 0.12.3",
"prost-build",
"thiserror",
"tokio",
"tonic 0.10.2",
"tonic-build",
"tower",
"tracing",
]
[[package]]
name = "text-generation-launcher"
version = "1.3.4"
dependencies = [
"clap",
"ctrlc",
"float_eq",
"nix",
"reqwest",
"serde",
"serde_json",
"tracing",
"tracing-subscriber",
"vergen",
]
[[package]]
name = "text-generation-router"
version = "1.3.4"
dependencies = [
"async-stream",
"axum",
"axum-tracing-opentelemetry",
"clap",
"futures",
"futures-util",
"hf-hub",
"init-tracing-opentelemetry",
"metrics",
"metrics-exporter-prometheus",
"minijinja",
"ngrok",
"nohash-hasher",
"opentelemetry",
"opentelemetry-otlp",
"rand",
"reqwest",
"serde",
"serde_json",
"text-generation-client",
"thiserror",
"tokenizers",
"tokio",
"tokio-stream",
"tower-http",
"tracing",
"tracing-opentelemetry",
"tracing-subscriber",
"utoipa",
"utoipa-swagger-ui",
"vergen",
]
[[package]]
name = "thiserror"
version = "1.0.51"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.51"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "thread_local"
version = "1.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"
dependencies = [
"cfg-if",
"once_cell",
]
[[package]]
name = "time"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e"
dependencies = [
"deranged",
"itoa",
"libc",
"num_threads",
"powerfmt",
"serde",
"time-core",
"time-macros",
]
[[package]]
name = "time-core"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
[[package]]
name = "time-macros"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f"
dependencies = [
"time-core",
]
[[package]]
name = "tinyvec"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50"
dependencies = [
"tinyvec_macros",
]
[[package]]
name = "tinyvec_macros"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokenizers"
version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9be88c795d8b9f9c4002b3a8f26a6d0876103a6f523b32ea3bac52d8560c17c"
dependencies = [
"aho-corasick",
"clap",
"derive_builder",
"esaxx-rs",
"getrandom",
"hf-hub",
"indicatif",
"itertools 0.11.0",
"lazy_static",
"log",
"macro_rules_attribute",
"monostate",
"onig",
"paste",
"rand",
"rayon",
"rayon-cond",
"regex",
"regex-syntax 0.7.5",
"serde",
"serde_json",
"spm_precompiled",
"thiserror",
"unicode-normalization-alignments",
"unicode-segmentation",
"unicode_categories",
]
[[package]]
name = "tokio"
version = "1.35.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104"
dependencies = [
"backtrace",
"bytes",
"libc",
"mio",
"num_cpus",
"parking_lot",
"pin-project-lite",
"signal-hook-registry",
"socket2",
"tokio-macros",
"windows-sys 0.48.0",
]
[[package]]
name = "tokio-io-timeout"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf"
dependencies = [
"pin-project-lite",
"tokio",
]
[[package]]
name = "tokio-macros"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "tokio-native-tls"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2"
dependencies = [
"native-tls",
"tokio",
]
[[package]]
name = "tokio-retry"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f"
dependencies = [
"pin-project",
"rand",
"tokio",
]
[[package]]
name = "tokio-stream"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842"
dependencies = [
"futures-core",
"pin-project-lite",
"tokio",
]
[[package]]
name = "tokio-util"
version = "0.7.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15"
dependencies = [
"bytes",
"futures-core",
"futures-io",
"futures-sink",
"pin-project-lite",
"tokio",
"tracing",
]
[[package]]
name = "tonic"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a"
dependencies = [
"async-trait",
"axum",
"base64 0.21.5",
"bytes",
"futures-core",
"futures-util",
"h2",
"http",
"http-body",
"hyper",
"hyper-timeout",
"percent-encoding",
"pin-project",
"prost 0.11.9",
"tokio",
"tokio-stream",
"tower",
"tower-layer",
"tower-service",
"tracing",
]
[[package]]
name = "tonic"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e"
dependencies = [
"async-stream",
"async-trait",
"axum",
"base64 0.21.5",
"bytes",
"h2",
"http",
"http-body",
"hyper",
"hyper-timeout",
"percent-encoding",
"pin-project",
"prost 0.12.3",
"tokio",
"tokio-stream",
"tower",
"tower-layer",
"tower-service",
"tracing",
]
[[package]]
name = "tonic-build"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889"
dependencies = [
"prettyplease",
"proc-macro2",
"prost-build",
"quote",
"syn 2.0.42",
]
[[package]]
name = "tower"
version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c"
dependencies = [
"futures-core",
"futures-util",
"indexmap 1.9.3",
"pin-project",
"pin-project-lite",
"rand",
"slab",
"tokio",
"tokio-util",
"tower-layer",
"tower-service",
"tracing",
]
[[package]]
name = "tower-http"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140"
dependencies = [
"bitflags 2.4.1",
"bytes",
"futures-core",
"futures-util",
"http",
"http-body",
"http-range-header",
"pin-project-lite",
"tower-layer",
"tower-service",
]
[[package]]
name = "tower-layer"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0"
[[package]]
name = "tower-service"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"
[[package]]
name = "tracing"
version = "0.1.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef"
dependencies = [
"log",
"pin-project-lite",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-attributes"
version = "0.1.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "tracing-core"
version = "0.1.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
dependencies = [
"once_cell",
"valuable",
]
[[package]]
name = "tracing-log"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2"
dependencies = [
"log",
"once_cell",
"tracing-core",
]
[[package]]
name = "tracing-log"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3"
dependencies = [
"log",
"once_cell",
"tracing-core",
]
[[package]]
name = "tracing-opentelemetry"
version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75327c6b667828ddc28f5e3f169036cb793c3f588d83bf0f262a7f062ffed3c8"
dependencies = [
"once_cell",
"opentelemetry",
"opentelemetry_sdk",
"smallvec",
"tracing",
"tracing-core",
"tracing-log 0.1.4",
"tracing-subscriber",
]
[[package]]
name = "tracing-opentelemetry-instrumentation-sdk"
version = "0.14.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f523eba1b52bb854b804d43a039aafeaee5a623015065adbfef8016825319c15"
dependencies = [
"http",
"opentelemetry-http",
"opentelemetry_api",
"tracing",
"tracing-opentelemetry",
]
[[package]]
name = "tracing-serde"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1"
dependencies = [
"serde",
"tracing-core",
]
[[package]]
name = "tracing-subscriber"
version = "0.3.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b"
dependencies = [
"matchers",
"nu-ansi-term",
"once_cell",
"regex",
"serde",
"serde_json",
"sharded-slab",
"smallvec",
"thread_local",
"tracing",
"tracing-core",
"tracing-log 0.2.0",
"tracing-serde",
]
[[package]]
name = "try-lock"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
[[package]]
name = "typenum"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "unicase"
version = "2.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89"
dependencies = [
"version_check",
]
[[package]]
name = "unicode-bidi"
version = "0.3.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416"
[[package]]
name = "unicode-ident"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "unicode-normalization"
version = "0.1.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921"
dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-normalization-alignments"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43f613e4fa046e69818dd287fdc4bc78175ff20331479dab6e1b0f98d57062de"
dependencies = [
"smallvec",
]
[[package]]
name = "unicode-segmentation"
version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36"
[[package]]
name = "unicode-width"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85"
[[package]]
name = "unicode_categories"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e"
[[package]]
name = "untrusted"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
[[package]]
name = "untrusted"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "ureq"
version = "2.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8cdd25c339e200129fe4de81451814e5228c9b771d57378817d6117cc2b3f97"
dependencies = [
"base64 0.21.5",
"flate2",
"log",
"native-tls",
"once_cell",
"rustls 0.21.10",
"rustls-webpki",
"serde",
"serde_json",
"url",
"webpki-roots",
]
[[package]]
name = "url"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633"
dependencies = [
"form_urlencoded",
"idna",
"percent-encoding",
]
[[package]]
name = "urlencoding"
version = "2.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
[[package]]
name = "utf8parse"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "utoipa"
version = "3.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d82b1bc5417102a73e8464c686eef947bdfb99fcdfc0a4f228e81afa9526470a"
dependencies = [
"indexmap 2.1.0",
"serde",
"serde_json",
"utoipa-gen",
]
[[package]]
name = "utoipa-gen"
version = "3.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05d96dcd6fc96f3df9b3280ef480770af1b7c5d14bc55192baa9b067976d920c"
dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
"regex",
"syn 2.0.42",
]
[[package]]
name = "utoipa-swagger-ui"
version = "3.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "84614caa239fb25b2bb373a52859ffd94605ceb256eeb1d63436325cf81e3653"
dependencies = [
"axum",
"mime_guess",
"regex",
"rust-embed",
"serde",
"serde_json",
"utoipa",
"zip",
]
[[package]]
name = "valuable"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
[[package]]
name = "vcpkg"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
[[package]]
name = "vergen"
version = "8.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1290fd64cc4e7d3c9b07d7f333ce0ce0007253e32870e632624835cc80b83939"
dependencies = [
"anyhow",
"rustc_version",
"rustversion",
"sysinfo",
"time",
]
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "walkdir"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "want"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e"
dependencies = [
"try-lock",
]
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e"
dependencies = [
"cfg-if",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
"syn 2.0.42",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-futures"
version = "0.4.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12"
dependencies = [
"cfg-if",
"js-sys",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.42",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f"
[[package]]
name = "web-sys"
version = "0.3.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "webpki"
version = "0.22.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53"
dependencies = [
"ring 0.17.7",
"untrusted 0.9.0",
]
[[package]]
name = "webpki-roots"
version = "0.25.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1778a42e8b3b90bff8d0f5032bf22250792889a5cdc752aa0020c84abe3aaf10"
[[package]]
name = "which"
version = "4.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7"
dependencies = [
"either",
"home",
"once_cell",
"rustix",
]
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596"
dependencies = [
"winapi",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-sys"
version = "0.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
dependencies = [
"windows-targets 0.42.2",
]
[[package]]
name = "windows-sys"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
dependencies = [
"windows-targets 0.48.5",
]
[[package]]
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets 0.52.0",
]
[[package]]
name = "windows-targets"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
dependencies = [
"windows_aarch64_gnullvm 0.42.2",
"windows_aarch64_msvc 0.42.2",
"windows_i686_gnu 0.42.2",
"windows_i686_msvc 0.42.2",
"windows_x86_64_gnu 0.42.2",
"windows_x86_64_gnullvm 0.42.2",
"windows_x86_64_msvc 0.42.2",
]
[[package]]
name = "windows-targets"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
dependencies = [
"windows_aarch64_gnullvm 0.48.5",
"windows_aarch64_msvc 0.48.5",
"windows_i686_gnu 0.48.5",
"windows_i686_msvc 0.48.5",
"windows_x86_64_gnu 0.48.5",
"windows_x86_64_gnullvm 0.48.5",
"windows_x86_64_msvc 0.48.5",
]
[[package]]
name = "windows-targets"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
dependencies = [
"windows_aarch64_gnullvm 0.52.0",
"windows_aarch64_msvc 0.52.0",
"windows_i686_gnu 0.52.0",
"windows_i686_msvc 0.52.0",
"windows_x86_64_gnu 0.52.0",
"windows_x86_64_gnullvm 0.52.0",
"windows_x86_64_msvc 0.52.0",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
[[package]]
name = "windows_aarch64_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
[[package]]
name = "windows_i686_gnu"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
[[package]]
name = "windows_i686_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_i686_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
[[package]]
name = "windows_x86_64_gnu"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
[[package]]
name = "windows_x86_64_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
[[package]]
name = "winreg"
version = "0.50.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
dependencies = [
"cfg-if",
"windows-sys 0.48.0",
]
[[package]]
name = "zerocopy"
version = "0.7.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.7.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.42",
]
[[package]]
name = "zip"
version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261"
dependencies = [
"byteorder",
"crc32fast",
"crossbeam-utils",
"flate2",
]
| 0 |
hf_public_repos | hf_public_repos/text-generation-inference/Makefile | install-server:
cd server && make install
install-custom-kernels:
if [ "$$BUILD_EXTENSIONS" = "True" ]; then cd server/custom_kernels && python setup.py install; else echo "Custom kernels are disabled, you need to set the BUILD_EXTENSIONS environment variable to 'True' in order to build them. (Please read the docs, kernels might not work on all hardware)"; fi
install-integration-tests:
cd integration-tests && pip install -r requirements.txt
cd clients/python && pip install .
install-router:
cd router && cargo install --path .
install-launcher:
cd launcher && cargo install --path .
install-benchmark:
cd benchmark && cargo install --path .
install: install-server install-router install-launcher install-custom-kernels
server-dev:
cd server && make run-dev
router-dev:
cd router && cargo run -- --port 8080
rust-tests: install-router install-launcher
cargo test
integration-tests: install-integration-tests
pytest -s -vv -m "not private" integration-tests
update-integration-tests: install-integration-tests
pytest -s -vv --snapshot-update integration-tests
python-server-tests:
HF_HUB_ENABLE_HF_TRANSFER=1 pytest -s -vv -m "not private" server/tests
python-client-tests:
pytest clients/python/tests
python-tests: python-server-tests python-client-tests
run-falcon-7b-instruct:
text-generation-launcher --model-id tiiuae/falcon-7b-instruct --port 8080
run-falcon-7b-instruct-quantize:
text-generation-launcher --model-id tiiuae/falcon-7b-instruct --quantize bitsandbytes --port 8080
clean:
rm -rf target aml
| 0 |
hf_public_repos | hf_public_repos/text-generation-inference/Cargo.toml | [workspace]
members = [
"benchmark",
"router",
"router/client",
"router/grpc-metadata",
"launcher"
]
[workspace.package]
version = "1.3.4"
edition = "2021"
authors = ["Olivier Dehaene"]
homepage = "https://github.com/huggingface/text-generation-inference"
[profile.release]
debug = 1
incremental = true
lto = "off"
panic = "abort"
| 0 |
hf_public_repos | hf_public_repos/text-generation-inference/LICENSE | Hugging Face Optimized Inference License 1.0 (HFOILv1.0)
This License Agreement governs the use of the Software and its Modifications. It is a
binding agreement between the Licensor and You.
This License Agreement shall be referred to as Hugging Face Optimized Inference License
1.0 or HFOILv1.0. We may publish revised versions of this License Agreement from time to
time. Each version will be given a distinguished number.
By downloading, accessing, modifying, distributing or otherwise using the Software, You
consent to all of the terms and conditions below. So, if You do not agree with those,
please do not download, access, modify, distribute, or use the Software.
1. PERMISSIONS
You may use, modify and distribute the Software pursuant to the following terms and
conditions:
Copyright License. Subject to the terms and conditions of this License Agreement and where
and as applicable, each Contributor hereby grants You a perpetual, worldwide,
non-exclusive, royalty-free, copyright license to reproduce, prepare, publicly display,
publicly perform, sublicense under the terms herein, and distribute the Software and
Modifications of the Software.
Patent License. Subject to the terms and conditions of this License Agreement and where
and as applicable, each Contributor hereby grants You a perpetual, worldwide,
non-exclusive, royalty-free patent license to make, have made, Use, offer to sell, sell,
import, and otherwise transfer the Software, where such license applies only to those
patent claims licensable by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s) with the Software to
which such Contribution(s) was submitted. If You institute patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Software
or a Contribution incorporated within the Software constitutes direct or contributory
patent infringement, then any rights granted to You under this License Agreement for the
Software shall terminate as of the date such litigation is filed.
No other rights. All rights not expressly granted herein are retained.
2. RESTRICTIONS
You may not distribute the Software as a hosted or managed, and paid service, where the
service grants users access to any substantial set of the features or functionality of the
Software. If you wish to do so, You will need to be granted additional rights from the
Licensor which will be subject to a separate mutually agreed agreement.
You may not sublicense the Software under any other terms than those listed in this
License.
3. OBLIGATIONS
When You modify the Software, You agree to: - attach a notice stating the Modifications of
the Software You made; and - attach a notice stating that the Modifications of the
Software are released under this License Agreement.
When You distribute the Software or Modifications of the Software, You agree to: - give
any recipients of the Software a copy of this License Agreement; - retain all Explanatory
Documentation; and if sharing the Modifications of the Software, add Explanatory
Documentation documenting the changes made to create the Modifications of the Software; -
retain all copyright, patent, trademark and attribution notices.
4. MISCELLANEOUS
Termination. Licensor reserves the right to restrict Use of the Software in violation of
this License Agreement, upon which Your licenses will automatically terminate.
Contributions. Unless You explicitly state otherwise, any Contribution intentionally
submitted for inclusion in the Software by You to the Licensor shall be under the terms
and conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of any
separate license agreement you may have executed with Licensor regarding such
Contributions.
Trademarks and related. Nothing in this License Agreement permits You (i) to make Use of
Licensors’ trademarks, trade names, or logos, (ii) otherwise suggest endorsement by
Licensor, or (iii) misrepresent the relationship between the parties; and any rights not
expressly granted herein are reserved by the Licensors.
Output You generate. Licensor claims no rights in the Output. You agree not to contravene
any provision as stated in the License Agreement with your Use of the Output.
Disclaimer of Warranty. Except as expressly provided otherwise herein, and to the fullest
extent permitted by law, Licensor provides the Software (and each Contributor provides its
Contributions) AS IS, and Licensor disclaims all warranties or guarantees of any kind,
express or implied, whether arising under any law or from any usage in trade, or otherwise
including but not limited to the implied warranties of merchantability, non-infringement,
quiet enjoyment, fitness for a particular purpose, or otherwise. You are solely
responsible for determining the appropriateness of the Software and Modifications of the
Software for your purposes (including your use or distribution of the Software and
Modifications of the Software), and assume any risks associated with Your exercise of
permissions under this License Agreement.
Limitation of Liability. In no event and under no legal theory, whether in tort (including
negligence), contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to
You for damages, including any direct, indirect, special, incidental, or consequential
damages of any character arising as a result of this License Agreement or out of the Use
or inability to Use the Software (including but not limited to damages for loss of
goodwill, work stoppage, computer failure or malfunction, model failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has been advised
of the possibility of such damages.
Accepting Warranty or Additional Liability. While sharing the Software or Modifications of
the Software thereof, You may choose to offer and charge a fee for, acceptance of support,
warranty, indemnity, or other liability obligations and/or rights consistent with this
License Agreement. However, in accepting such obligations, You may act only on Your own
behalf and on Your sole responsibility, not on behalf of Licensor or any other
Contributor, and you hereby agree to indemnify, defend, and hold Licensor and each other
Contributor (and their successors or assigns) harmless for any liability incurred by, or
claims asserted against, such Licensor or Contributor (and their successors or assigns) by
reason of your accepting any such warranty or additional liability.
Severability. This License Agreement is a license of copyright and patent rights and an
agreement in contract between You and the Licensor. If any provision of this License
Agreement is held to be invalid, illegal or unenforceable, the remaining provisions shall
be unaffected thereby and remain valid as if such provision had not been set forth herein.
5. DEFINITIONS
“Contribution” refers to any work of authorship, including the original version of the
Software and any Modifications of the Software that is intentionally submitted to Licensor
for inclusion in the Software by the copyright owner or by an individual or entity
authorized to submit on behalf of the copyright owner. For the purposes of this
definition, “submitted” means any form of electronic, verbal, or written communication
sent to the Licensor or its representatives, including but not limited to communication on
electronic mailing lists, source code control systems, and issue tracking systems that are
managed by, or on behalf of, the Licensor for the purpose of discussing and improving the
Software, but excluding communication that is conspicuously marked or otherwise designated
in writing by the copyright owner as “Not a Contribution.”
“Contributor” refers to Licensor and any individual or entity on behalf of whom a
Contribution has been received by Licensor and subsequently incorporated within the
Software.
“Data” refers to a collection of information extracted from the dataset used with the
Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not
licensed under this License Agreement.
“Explanatory Documentation” refers to any documentation or related information including
but not limited to model cards or data cards dedicated to inform the public about the
characteristics of the Software. Explanatory documentation is not licensed under this
License.
"License Agreement" refers to these terms and conditions.
“Licensor” refers to the rights owners or entity authorized by the rights owners that are
granting the terms and conditions of this License Agreement.
“Model” refers to machine-learning based assemblies (including checkpoints), consisting of
learnt weights and parameters (including optimizer states), corresponding to a model
architecture as embodied in Software source code. Source code is not licensed under this
License Agreement.
“Modifications of the Software” refers to all changes to the Software, including without
limitation derivative works of the Software.
“Output” refers to the results of operating the Software.
“Share” refers to any transmission, reproduction, publication or other sharing of the
Software or Modifications of the Software to a third party, including providing the
Softwaire as a hosted service made available by electronic or other remote means,
including - but not limited to - API-based or web access.
“Software” refers to the software and Model (or parts of either) that Licensor makes
available under this License Agreement.
“Third Parties” refers to individuals or legal entities that are not under common control
with Licensor or You.
“Use” refers to anything You or your representatives do with the Software, including but
not limited to generating any Output, fine tuning, updating, running, training, evaluating
and/or reparametrizing the Model.
"You" (or "Your") refers to an individual or Legal Entity exercising permissions granted
by this License Agreement and/or making Use of the Software for whichever purpose and in
any field of Use.
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/load_tests/vllm.js | import { get_options, run } from "./common.js";
const reference_latency_ms = 22;
const host = __ENV.HOST || '127.0.0.1:8000';
const max_new_tokens = 50;
function generate_payload(gpt){
const input = gpt["conversations"][0]["value"];
return {"prompt": input, "temperature": 0.5, "ignore_eos": true}
}
export const options = get_options(reference_latency_ms);
export default function(){
run(host, generate_payload, max_new_tokens);
}
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/load_tests/common.js | import { check, randomSeed } from 'k6';
import http from 'k6/http';
import { Trend, Counter } from 'k6/metrics';
import { randomItem } from 'https://jslib.k6.io/k6-utils/1.2.0/index.js';
const seed = 0;
const host = __ENV.HOST || '127.0.0.1:8000';
const timePerToken = new Trend('time_per_token', true);
const tokens = new Counter('tokens');
const new_tokens = new Counter('new_tokens');
const input_tokens = new Counter('input_tokens');
randomSeed(seed);
// const shareGPT = JSON.parse(open("ShareGPT_V3_unfiltered_cleaned_split.json"))
const shareGPT = JSON.parse(open("small.json"))
export function get_options(reference_latency_ms){
return {
thresholds: {
http_req_failed: ['rate==0'],
time_per_token: [{
threshold: `p(50)<${5 * reference_latency_ms}`,
abortOnFail: true,
delayAbortEval: '10s'
}],
},
scenarios: {
load_test: {
executor: 'constant-arrival-rate',
duration: '60s',
preAllocatedVUs: 10,
rate: 10,
timeUnit: '1s',
},
},
};
}
export function run(host, generate_payload, max_new_tokens) {
const headers = {'Content-Type': 'application/json'};
const query = randomItem(shareGPT);
const payload = JSON.stringify(generate_payload(query));
const res = http.post(`http://${host}/generate`, payload, {
headers,
});
if(res.status >= 400 && res.status < 500){
return;
}
check(res, {
'Post status is 200': (r) => res.status === 200,
});
const duration = res.timings.duration;
if (res.status === 200) {
const body = res.json();
const n_tokens = body.details.tokens.length;
const latency_ms_per_token = duration / n_tokens;
timePerToken.add(latency_ms_per_token);
const latency_in_s = latency_ms_per_token / 1000;
const individual_throughput = 1 / latency_in_s;
const _input_tokens = body.details.prefill.length;
tokens.add(n_tokens + _input_tokens);
input_tokens.add(_input_tokens);
new_tokens.add(n_tokens);
}
}
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/load_tests/tgi.js | import { get_options, run } from "./common.js";
const reference_latency_ms = 70;
const host = __ENV.HOST || '127.0.0.1:8000';
const max_new_tokens = 50;
function generate_payload(gpt){
const input = gpt["conversations"][0]["value"];
return {"inputs": input, "parameters": {"max_new_tokens": max_new_tokens, "decoder_input_details": true}}
}
export const options = get_options(reference_latency_ms);
export default function(){
run(host, generate_payload, max_new_tokens);
}
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/load_tests/starcoder_load.js | import {check} from 'k6';
import http from 'k6/http';
import {Trend} from 'k6/metrics';
const host = __ENV.HOST || '127.0.0.1:3000';
const totalTime = new Trend('total_time', true);
const validationTime = new Trend('validation_time', true);
const queueTime = new Trend('queue_time', true);
const inferenceTime = new Trend('inference_time', true);
const timePerToken = new Trend('time_per_token', true);
const example = {
payload: JSON.stringify({
inputs: '# This is a fibonacci function written in the Python programming language.' +
'def fibonacci',
parameters: {
details: true,
max_new_tokens: 60,
temperature: 0.2,
top_p: 0.95,
seed: 0,
},
}),
generated_tokens: 60
};
export const options = {
thresholds: {
http_req_failed: ['rate==0'],
time_per_token: ['p(95)<90'],
queue_time: ['p(95)<1500'],
},
scenarios: {
load_test: {
executor: 'constant-arrival-rate',
duration: '60s',
preAllocatedVUs: 100,
rate: 10,
timeUnit: '1s',
},
},
};
export default function () {
const headers = {'Content-Type': 'application/json'};
const res = http.post(`http://${host}/generate`, example.payload, {
headers,
});
check(res, {
'Post status is 200': (r) => res.status === 200,
'Post response generated tokens': (r) => res.status === 200 && res.json().details.generated_tokens === example.generated_tokens,
});
if (res.status === 200) {
totalTime.add(res.headers["X-Total-Time"]);
validationTime.add(res.headers["X-Validation-Time"]);
queueTime.add(res.headers["X-Queue-Time"]);
inferenceTime.add(res.headers["X-Inference-Time"]);
timePerToken.add(res.headers["X-Time-Per-Token"]);
}
} | 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/integration-tests/pyproject.toml | [tool.poetry]
name = "text-generation-integration-tests"
version = "1.3.4"
description = "Text Generation Inference integration tests"
authors = ["Nicolas Patry <nicolas@huggingface.co>"]
[tool.poetry.dependencies]
python = ">=3.9,<3.13"
syrupy = "4.0.1"
text-generation = "^0.6.0"
pytest = "^7.4.0"
pytest-asyncio = "^0.21.1"
docker = "^6.1.3"
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/integration-tests/poetry.lock | # This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
[[package]]
name = "aiohttp"
version = "3.8.5"
description = "Async http client/server framework (asyncio)"
optional = false
python-versions = ">=3.6"
files = [
{file = "aiohttp-3.8.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a94159871304770da4dd371f4291b20cac04e8c94f11bdea1c3478e557fbe0d8"},
{file = "aiohttp-3.8.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:13bf85afc99ce6f9ee3567b04501f18f9f8dbbb2ea11ed1a2e079670403a7c84"},
{file = "aiohttp-3.8.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ce2ac5708501afc4847221a521f7e4b245abf5178cf5ddae9d5b3856ddb2f3a"},
{file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96943e5dcc37a6529d18766597c491798b7eb7a61d48878611298afc1fca946c"},
{file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ad5c3c4590bb3cc28b4382f031f3783f25ec223557124c68754a2231d989e2b"},
{file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c413c633d0512df4dc7fd2373ec06cc6a815b7b6d6c2f208ada7e9e93a5061d"},
{file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df72ac063b97837a80d80dec8d54c241af059cc9bb42c4de68bd5b61ceb37caa"},
{file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c48c5c0271149cfe467c0ff8eb941279fd6e3f65c9a388c984e0e6cf57538e14"},
{file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:368a42363c4d70ab52c2c6420a57f190ed3dfaca6a1b19afda8165ee16416a82"},
{file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7607ec3ce4993464368505888af5beb446845a014bc676d349efec0e05085905"},
{file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0d21c684808288a98914e5aaf2a7c6a3179d4df11d249799c32d1808e79503b5"},
{file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:312fcfbacc7880a8da0ae8b6abc6cc7d752e9caa0051a53d217a650b25e9a691"},
{file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad093e823df03bb3fd37e7dec9d4670c34f9e24aeace76808fc20a507cace825"},
{file = "aiohttp-3.8.5-cp310-cp310-win32.whl", hash = "sha256:33279701c04351a2914e1100b62b2a7fdb9a25995c4a104259f9a5ead7ed4802"},
{file = "aiohttp-3.8.5-cp310-cp310-win_amd64.whl", hash = "sha256:6e4a280e4b975a2e7745573e3fc9c9ba0d1194a3738ce1cbaa80626cc9b4f4df"},
{file = "aiohttp-3.8.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae871a964e1987a943d83d6709d20ec6103ca1eaf52f7e0d36ee1b5bebb8b9b9"},
{file = "aiohttp-3.8.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:461908b2578955045efde733719d62f2b649c404189a09a632d245b445c9c975"},
{file = "aiohttp-3.8.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:72a860c215e26192379f57cae5ab12b168b75db8271f111019509a1196dfc780"},
{file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc14be025665dba6202b6a71cfcdb53210cc498e50068bc088076624471f8bb9"},
{file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8af740fc2711ad85f1a5c034a435782fbd5b5f8314c9a3ef071424a8158d7f6b"},
{file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:841cd8233cbd2111a0ef0a522ce016357c5e3aff8a8ce92bcfa14cef890d698f"},
{file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ed1c46fb119f1b59304b5ec89f834f07124cd23ae5b74288e364477641060ff"},
{file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84f8ae3e09a34f35c18fa57f015cc394bd1389bce02503fb30c394d04ee6b938"},
{file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62360cb771707cb70a6fd114b9871d20d7dd2163a0feafe43fd115cfe4fe845e"},
{file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:23fb25a9f0a1ca1f24c0a371523546366bb642397c94ab45ad3aedf2941cec6a"},
{file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0ba0d15164eae3d878260d4c4df859bbdc6466e9e6689c344a13334f988bb53"},
{file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5d20003b635fc6ae3f96d7260281dfaf1894fc3aa24d1888a9b2628e97c241e5"},
{file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0175d745d9e85c40dcc51c8f88c74bfbaef9e7afeeeb9d03c37977270303064c"},
{file = "aiohttp-3.8.5-cp311-cp311-win32.whl", hash = "sha256:2e1b1e51b0774408f091d268648e3d57f7260c1682e7d3a63cb00d22d71bb945"},
{file = "aiohttp-3.8.5-cp311-cp311-win_amd64.whl", hash = "sha256:043d2299f6dfdc92f0ac5e995dfc56668e1587cea7f9aa9d8a78a1b6554e5755"},
{file = "aiohttp-3.8.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cae533195e8122584ec87531d6df000ad07737eaa3c81209e85c928854d2195c"},
{file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f21e83f355643c345177a5d1d8079f9f28b5133bcd154193b799d380331d5d3"},
{file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7a75ef35f2df54ad55dbf4b73fe1da96f370e51b10c91f08b19603c64004acc"},
{file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e2e9839e14dd5308ee773c97115f1e0a1cb1d75cbeeee9f33824fa5144c7634"},
{file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44e65da1de4403d0576473e2344828ef9c4c6244d65cf4b75549bb46d40b8dd"},
{file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78d847e4cde6ecc19125ccbc9bfac4a7ab37c234dd88fbb3c5c524e8e14da543"},
{file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:c7a815258e5895d8900aec4454f38dca9aed71085f227537208057853f9d13f2"},
{file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:8b929b9bd7cd7c3939f8bcfffa92fae7480bd1aa425279d51a89327d600c704d"},
{file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:5db3a5b833764280ed7618393832e0853e40f3d3e9aa128ac0ba0f8278d08649"},
{file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:a0215ce6041d501f3155dc219712bc41252d0ab76474615b9700d63d4d9292af"},
{file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:fd1ed388ea7fbed22c4968dd64bab0198de60750a25fe8c0c9d4bef5abe13824"},
{file = "aiohttp-3.8.5-cp36-cp36m-win32.whl", hash = "sha256:6e6783bcc45f397fdebc118d772103d751b54cddf5b60fbcc958382d7dd64f3e"},
{file = "aiohttp-3.8.5-cp36-cp36m-win_amd64.whl", hash = "sha256:b5411d82cddd212644cf9360879eb5080f0d5f7d809d03262c50dad02f01421a"},
{file = "aiohttp-3.8.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:01d4c0c874aa4ddfb8098e85d10b5e875a70adc63db91f1ae65a4b04d3344cda"},
{file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5980a746d547a6ba173fd5ee85ce9077e72d118758db05d229044b469d9029a"},
{file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a482e6da906d5e6e653be079b29bc173a48e381600161c9932d89dfae5942ef"},
{file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80bd372b8d0715c66c974cf57fe363621a02f359f1ec81cba97366948c7fc873"},
{file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1161b345c0a444ebcf46bf0a740ba5dcf50612fd3d0528883fdc0eff578006a"},
{file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd56db019015b6acfaaf92e1ac40eb8434847d9bf88b4be4efe5bfd260aee692"},
{file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:153c2549f6c004d2754cc60603d4668899c9895b8a89397444a9c4efa282aaf4"},
{file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4a01951fabc4ce26ab791da5f3f24dca6d9a6f24121746eb19756416ff2d881b"},
{file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bfb9162dcf01f615462b995a516ba03e769de0789de1cadc0f916265c257e5d8"},
{file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:7dde0009408969a43b04c16cbbe252c4f5ef4574ac226bc8815cd7342d2028b6"},
{file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4149d34c32f9638f38f544b3977a4c24052042affa895352d3636fa8bffd030a"},
{file = "aiohttp-3.8.5-cp37-cp37m-win32.whl", hash = "sha256:68c5a82c8779bdfc6367c967a4a1b2aa52cd3595388bf5961a62158ee8a59e22"},
{file = "aiohttp-3.8.5-cp37-cp37m-win_amd64.whl", hash = "sha256:2cf57fb50be5f52bda004b8893e63b48530ed9f0d6c96c84620dc92fe3cd9b9d"},
{file = "aiohttp-3.8.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:eca4bf3734c541dc4f374ad6010a68ff6c6748f00451707f39857f429ca36ced"},
{file = "aiohttp-3.8.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1274477e4c71ce8cfe6c1ec2f806d57c015ebf84d83373676036e256bc55d690"},
{file = "aiohttp-3.8.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:28c543e54710d6158fc6f439296c7865b29e0b616629767e685a7185fab4a6b9"},
{file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910bec0c49637d213f5d9877105d26e0c4a4de2f8b1b29405ff37e9fc0ad52b8"},
{file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5443910d662db951b2e58eb70b0fbe6b6e2ae613477129a5805d0b66c54b6cb7"},
{file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e460be6978fc24e3df83193dc0cc4de46c9909ed92dd47d349a452ef49325b7"},
{file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb1558def481d84f03b45888473fc5a1f35747b5f334ef4e7a571bc0dfcb11f8"},
{file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34dd0c107799dcbbf7d48b53be761a013c0adf5571bf50c4ecad5643fe9cfcd0"},
{file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aa1990247f02a54185dc0dff92a6904521172a22664c863a03ff64c42f9b5410"},
{file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0e584a10f204a617d71d359fe383406305a4b595b333721fa50b867b4a0a1548"},
{file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a3cf433f127efa43fee6b90ea4c6edf6c4a17109d1d037d1a52abec84d8f2e42"},
{file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:c11f5b099adafb18e65c2c997d57108b5bbeaa9eeee64a84302c0978b1ec948b"},
{file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:84de26ddf621d7ac4c975dbea4c945860e08cccde492269db4e1538a6a6f3c35"},
{file = "aiohttp-3.8.5-cp38-cp38-win32.whl", hash = "sha256:ab88bafedc57dd0aab55fa728ea10c1911f7e4d8b43e1d838a1739f33712921c"},
{file = "aiohttp-3.8.5-cp38-cp38-win_amd64.whl", hash = "sha256:5798a9aad1879f626589f3df0f8b79b3608a92e9beab10e5fda02c8a2c60db2e"},
{file = "aiohttp-3.8.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a6ce61195c6a19c785df04e71a4537e29eaa2c50fe745b732aa937c0c77169f3"},
{file = "aiohttp-3.8.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:773dd01706d4db536335fcfae6ea2440a70ceb03dd3e7378f3e815b03c97ab51"},
{file = "aiohttp-3.8.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f83a552443a526ea38d064588613aca983d0ee0038801bc93c0c916428310c28"},
{file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f7372f7341fcc16f57b2caded43e81ddd18df53320b6f9f042acad41f8e049a"},
{file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea353162f249c8097ea63c2169dd1aa55de1e8fecbe63412a9bc50816e87b761"},
{file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d47ae48db0b2dcf70bc8a3bc72b3de86e2a590fc299fdbbb15af320d2659de"},
{file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d827176898a2b0b09694fbd1088c7a31836d1a505c243811c87ae53a3f6273c1"},
{file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3562b06567c06439d8b447037bb655ef69786c590b1de86c7ab81efe1c9c15d8"},
{file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4e874cbf8caf8959d2adf572a78bba17cb0e9d7e51bb83d86a3697b686a0ab4d"},
{file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6809a00deaf3810e38c628e9a33271892f815b853605a936e2e9e5129762356c"},
{file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:33776e945d89b29251b33a7e7d006ce86447b2cfd66db5e5ded4e5cd0340585c"},
{file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eaeed7abfb5d64c539e2db173f63631455f1196c37d9d8d873fc316470dfbacd"},
{file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e91d635961bec2d8f19dfeb41a539eb94bd073f075ca6dae6c8dc0ee89ad6f91"},
{file = "aiohttp-3.8.5-cp39-cp39-win32.whl", hash = "sha256:00ad4b6f185ec67f3e6562e8a1d2b69660be43070bd0ef6fcec5211154c7df67"},
{file = "aiohttp-3.8.5-cp39-cp39-win_amd64.whl", hash = "sha256:c0a9034379a37ae42dea7ac1e048352d96286626251862e448933c0f59cbd79c"},
{file = "aiohttp-3.8.5.tar.gz", hash = "sha256:b9552ec52cc147dbf1944ac7ac98af7602e51ea2dcd076ed194ca3c0d1c7d0bc"},
]
[package.dependencies]
aiosignal = ">=1.1.2"
async-timeout = ">=4.0.0a3,<5.0"
attrs = ">=17.3.0"
charset-normalizer = ">=2.0,<4.0"
frozenlist = ">=1.1.1"
multidict = ">=4.5,<7.0"
yarl = ">=1.0,<2.0"
[package.extras]
speedups = ["Brotli", "aiodns", "cchardet"]
[[package]]
name = "aiosignal"
version = "1.3.1"
description = "aiosignal: a list of registered asynchronous callbacks"
optional = false
python-versions = ">=3.7"
files = [
{file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"},
{file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"},
]
[package.dependencies]
frozenlist = ">=1.1.0"
[[package]]
name = "async-timeout"
version = "4.0.3"
description = "Timeout context manager for asyncio programs"
optional = false
python-versions = ">=3.7"
files = [
{file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"},
{file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"},
]
[[package]]
name = "attrs"
version = "23.1.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.7"
files = [
{file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
{file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
]
[package.extras]
cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
dev = ["attrs[docs,tests]", "pre-commit"]
docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
tests = ["attrs[tests-no-zope]", "zope-interface"]
tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
[[package]]
name = "certifi"
version = "2023.7.22"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
{file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
{file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
]
[[package]]
name = "charset-normalizer"
version = "3.2.0"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7.0"
files = [
{file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"},
{file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"},
{file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"},
{file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"},
{file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"},
{file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"},
{file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"},
{file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"},
{file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"},
{file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"},
{file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"},
{file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"},
{file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"},
{file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"},
{file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"},
{file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"},
{file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"},
{file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"},
{file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"},
{file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"},
{file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"},
{file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"},
{file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"},
{file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"},
{file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"},
{file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"},
{file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"},
{file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"},
{file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"},
{file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"},
{file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"},
{file = "charset_normalizer-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346"},
{file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982"},
{file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c"},
{file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4"},
{file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449"},
{file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3"},
{file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a"},
{file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7"},
{file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd"},
{file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3"},
{file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592"},
{file = "charset_normalizer-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1"},
{file = "charset_normalizer-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959"},
{file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669"},
{file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329"},
{file = "charset_normalizer-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149"},
{file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94"},
{file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f"},
{file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"},
{file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a"},
{file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037"},
{file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46"},
{file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2"},
{file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d"},
{file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c"},
{file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10"},
{file = "charset_normalizer-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706"},
{file = "charset_normalizer-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e"},
{file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c"},
{file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f"},
{file = "charset_normalizer-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858"},
{file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5"},
{file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952"},
{file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4"},
{file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200"},
{file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252"},
{file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22"},
{file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c"},
{file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e"},
{file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299"},
{file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020"},
{file = "charset_normalizer-3.2.0-cp39-cp39-win32.whl", hash = "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9"},
{file = "charset_normalizer-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80"},
{file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"},
]
[[package]]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
[[package]]
name = "colored"
version = "1.4.4"
description = "Simple library for color and formatting to terminal"
optional = false
python-versions = "*"
files = [
{file = "colored-1.4.4.tar.gz", hash = "sha256:04ff4d4dd514274fe3b99a21bb52fb96f2688c01e93fba7bef37221e7cb56ce0"},
]
[[package]]
name = "docker"
version = "6.1.3"
description = "A Python library for the Docker Engine API."
optional = false
python-versions = ">=3.7"
files = [
{file = "docker-6.1.3-py3-none-any.whl", hash = "sha256:aecd2277b8bf8e506e484f6ab7aec39abe0038e29fa4a6d3ba86c3fe01844ed9"},
{file = "docker-6.1.3.tar.gz", hash = "sha256:aa6d17830045ba5ef0168d5eaa34d37beeb113948c413affe1d5991fc11f9a20"},
]
[package.dependencies]
packaging = ">=14.0"
pywin32 = {version = ">=304", markers = "sys_platform == \"win32\""}
requests = ">=2.26.0"
urllib3 = ">=1.26.0"
websocket-client = ">=0.32.0"
[package.extras]
ssh = ["paramiko (>=2.4.3)"]
[[package]]
name = "exceptiongroup"
version = "1.1.3"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
{file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"},
{file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"},
]
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "filelock"
version = "3.12.3"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.8"
files = [
{file = "filelock-3.12.3-py3-none-any.whl", hash = "sha256:f067e40ccc40f2b48395a80fcbd4728262fab54e232e090a4063ab804179efeb"},
{file = "filelock-3.12.3.tar.gz", hash = "sha256:0ecc1dd2ec4672a10c8550a8182f1bd0c0a5088470ecd5a125e45f49472fac3d"},
]
[package.dependencies]
typing-extensions = {version = ">=4.7.1", markers = "python_version < \"3.11\""}
[package.extras]
docs = ["furo (>=2023.7.26)", "sphinx (>=7.1.2)", "sphinx-autodoc-typehints (>=1.24)"]
testing = ["covdefaults (>=2.3)", "coverage (>=7.3)", "diff-cover (>=7.7)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest-timeout (>=2.1)"]
[[package]]
name = "frozenlist"
version = "1.4.0"
description = "A list-like structure which implements collections.abc.MutableSequence"
optional = false
python-versions = ">=3.8"
files = [
{file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"},
{file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"},
{file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"},
{file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"},
{file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"},
{file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"},
{file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"},
{file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"},
{file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"},
{file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"},
{file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"},
{file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"},
{file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"},
{file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"},
{file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"},
{file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"},
{file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"},
{file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"},
{file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"},
{file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"},
{file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"},
{file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"},
{file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"},
{file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"},
{file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"},
{file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"},
{file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"},
{file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"},
{file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"},
{file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"},
{file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"},
{file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"},
{file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"},
{file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"},
{file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"},
{file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"},
{file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"},
{file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"},
{file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"},
{file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"},
{file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"},
{file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"},
{file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"},
{file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"},
{file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"},
{file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"},
{file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"},
{file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"},
{file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"},
{file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"},
{file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"},
{file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"},
{file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"},
{file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"},
{file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"},
{file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"},
{file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"},
{file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"},
{file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"},
{file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"},
{file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"},
]
[[package]]
name = "fsspec"
version = "2023.6.0"
description = "File-system specification"
optional = false
python-versions = ">=3.8"
files = [
{file = "fsspec-2023.6.0-py3-none-any.whl", hash = "sha256:1cbad1faef3e391fba6dc005ae9b5bdcbf43005c9167ce78c915549c352c869a"},
{file = "fsspec-2023.6.0.tar.gz", hash = "sha256:d0b2f935446169753e7a5c5c55681c54ea91996cc67be93c39a154fb3a2742af"},
]
[package.extras]
abfs = ["adlfs"]
adl = ["adlfs"]
arrow = ["pyarrow (>=1)"]
dask = ["dask", "distributed"]
devel = ["pytest", "pytest-cov"]
dropbox = ["dropbox", "dropboxdrivefs", "requests"]
full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"]
fuse = ["fusepy"]
gcs = ["gcsfs"]
git = ["pygit2"]
github = ["requests"]
gs = ["gcsfs"]
gui = ["panel"]
hdfs = ["pyarrow (>=1)"]
http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"]
libarchive = ["libarchive-c"]
oci = ["ocifs"]
s3 = ["s3fs"]
sftp = ["paramiko"]
smb = ["smbprotocol"]
ssh = ["paramiko"]
tqdm = ["tqdm"]
[[package]]
name = "huggingface-hub"
version = "0.16.4"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.7.0"
files = [
{file = "huggingface_hub-0.16.4-py3-none-any.whl", hash = "sha256:0d3df29932f334fead024afc7cb4cc5149d955238b8b5e42dcf9740d6995a349"},
{file = "huggingface_hub-0.16.4.tar.gz", hash = "sha256:608c7d4f3d368b326d1747f91523dbd1f692871e8e2e7a4750314a2dd8b63e14"},
]
[package.dependencies]
filelock = "*"
fsspec = "*"
packaging = ">=20.9"
pyyaml = ">=5.1"
requests = "*"
tqdm = ">=4.42.1"
typing-extensions = ">=3.7.4.3"
[package.extras]
all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
cli = ["InquirerPy (==0.3.4)"]
dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
inference = ["aiohttp", "pydantic"]
quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"]
tensorflow = ["graphviz", "pydot", "tensorflow"]
testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
torch = ["torch"]
typing = ["pydantic", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
[[package]]
name = "idna"
version = "3.4"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.5"
files = [
{file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
{file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
]
[[package]]
name = "iniconfig"
version = "2.0.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.7"
files = [
{file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
]
[[package]]
name = "multidict"
version = "6.0.4"
description = "multidict implementation"
optional = false
python-versions = ">=3.7"
files = [
{file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"},
{file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"},
{file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"},
{file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"},
{file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"},
{file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"},
{file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"},
{file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"},
{file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"},
{file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"},
{file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"},
{file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"},
{file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"},
{file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"},
{file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"},
{file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"},
{file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"},
{file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"},
{file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"},
{file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"},
{file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"},
{file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"},
{file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"},
{file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"},
{file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"},
{file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"},
{file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"},
{file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"},
{file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"},
{file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"},
{file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"},
{file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"},
{file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"},
{file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"},
{file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"},
{file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"},
{file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"},
{file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"},
{file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"},
{file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"},
{file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"},
{file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"},
{file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"},
{file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"},
{file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"},
{file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"},
{file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"},
{file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"},
{file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"},
{file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"},
{file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"},
{file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"},
{file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"},
{file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"},
{file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"},
{file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"},
{file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"},
{file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"},
{file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"},
{file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"},
{file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"},
{file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"},
{file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"},
{file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"},
{file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"},
{file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"},
{file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"},
{file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"},
{file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"},
{file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"},
{file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"},
{file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"},
{file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"},
{file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"},
]
[[package]]
name = "packaging"
version = "23.1"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.7"
files = [
{file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"},
{file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"},
]
[[package]]
name = "pluggy"
version = "1.3.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
files = [
{file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"},
{file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"},
]
[package.extras]
dev = ["pre-commit", "tox"]
testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "pydantic"
version = "1.10.12"
description = "Data validation and settings management using python type hints"
optional = false
python-versions = ">=3.7"
files = [
{file = "pydantic-1.10.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a1fcb59f2f355ec350073af41d927bf83a63b50e640f4dbaa01053a28b7a7718"},
{file = "pydantic-1.10.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b7ccf02d7eb340b216ec33e53a3a629856afe1c6e0ef91d84a4e6f2fb2ca70fe"},
{file = "pydantic-1.10.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fb2aa3ab3728d950bcc885a2e9eff6c8fc40bc0b7bb434e555c215491bcf48b"},
{file = "pydantic-1.10.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:771735dc43cf8383959dc9b90aa281f0b6092321ca98677c5fb6125a6f56d58d"},
{file = "pydantic-1.10.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ca48477862372ac3770969b9d75f1bf66131d386dba79506c46d75e6b48c1e09"},
{file = "pydantic-1.10.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a5e7add47a5b5a40c49b3036d464e3c7802f8ae0d1e66035ea16aa5b7a3923ed"},
{file = "pydantic-1.10.12-cp310-cp310-win_amd64.whl", hash = "sha256:e4129b528c6baa99a429f97ce733fff478ec955513630e61b49804b6cf9b224a"},
{file = "pydantic-1.10.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b0d191db0f92dfcb1dec210ca244fdae5cbe918c6050b342d619c09d31eea0cc"},
{file = "pydantic-1.10.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:795e34e6cc065f8f498c89b894a3c6da294a936ee71e644e4bd44de048af1405"},
{file = "pydantic-1.10.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69328e15cfda2c392da4e713443c7dbffa1505bc9d566e71e55abe14c97ddc62"},
{file = "pydantic-1.10.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2031de0967c279df0d8a1c72b4ffc411ecd06bac607a212892757db7462fc494"},
{file = "pydantic-1.10.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ba5b2e6fe6ca2b7e013398bc7d7b170e21cce322d266ffcd57cca313e54fb246"},
{file = "pydantic-1.10.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2a7bac939fa326db1ab741c9d7f44c565a1d1e80908b3797f7f81a4f86bc8d33"},
{file = "pydantic-1.10.12-cp311-cp311-win_amd64.whl", hash = "sha256:87afda5539d5140cb8ba9e8b8c8865cb5b1463924d38490d73d3ccfd80896b3f"},
{file = "pydantic-1.10.12-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:549a8e3d81df0a85226963611950b12d2d334f214436a19537b2efed61b7639a"},
{file = "pydantic-1.10.12-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:598da88dfa127b666852bef6d0d796573a8cf5009ffd62104094a4fe39599565"},
{file = "pydantic-1.10.12-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba5c4a8552bff16c61882db58544116d021d0b31ee7c66958d14cf386a5b5350"},
{file = "pydantic-1.10.12-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c79e6a11a07da7374f46970410b41d5e266f7f38f6a17a9c4823db80dadf4303"},
{file = "pydantic-1.10.12-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab26038b8375581dc832a63c948f261ae0aa21f1d34c1293469f135fa92972a5"},
{file = "pydantic-1.10.12-cp37-cp37m-win_amd64.whl", hash = "sha256:e0a16d274b588767602b7646fa05af2782576a6cf1022f4ba74cbb4db66f6ca8"},
{file = "pydantic-1.10.12-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6a9dfa722316f4acf4460afdf5d41d5246a80e249c7ff475c43a3a1e9d75cf62"},
{file = "pydantic-1.10.12-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a73f489aebd0c2121ed974054cb2759af8a9f747de120acd2c3394cf84176ccb"},
{file = "pydantic-1.10.12-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b30bcb8cbfccfcf02acb8f1a261143fab622831d9c0989707e0e659f77a18e0"},
{file = "pydantic-1.10.12-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fcfb5296d7877af406ba1547dfde9943b1256d8928732267e2653c26938cd9c"},
{file = "pydantic-1.10.12-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2f9a6fab5f82ada41d56b0602606a5506aab165ca54e52bc4545028382ef1c5d"},
{file = "pydantic-1.10.12-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dea7adcc33d5d105896401a1f37d56b47d443a2b2605ff8a969a0ed5543f7e33"},
{file = "pydantic-1.10.12-cp38-cp38-win_amd64.whl", hash = "sha256:1eb2085c13bce1612da8537b2d90f549c8cbb05c67e8f22854e201bde5d98a47"},
{file = "pydantic-1.10.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ef6c96b2baa2100ec91a4b428f80d8f28a3c9e53568219b6c298c1125572ebc6"},
{file = "pydantic-1.10.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c076be61cd0177a8433c0adcb03475baf4ee91edf5a4e550161ad57fc90f523"},
{file = "pydantic-1.10.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d5a58feb9a39f481eda4d5ca220aa8b9d4f21a41274760b9bc66bfd72595b86"},
{file = "pydantic-1.10.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5f805d2d5d0a41633651a73fa4ecdd0b3d7a49de4ec3fadf062fe16501ddbf1"},
{file = "pydantic-1.10.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1289c180abd4bd4555bb927c42ee42abc3aee02b0fb2d1223fb7c6e5bef87dbe"},
{file = "pydantic-1.10.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5d1197e462e0364906cbc19681605cb7c036f2475c899b6f296104ad42b9f5fb"},
{file = "pydantic-1.10.12-cp39-cp39-win_amd64.whl", hash = "sha256:fdbdd1d630195689f325c9ef1a12900524dceb503b00a987663ff4f58669b93d"},
{file = "pydantic-1.10.12-py3-none-any.whl", hash = "sha256:b749a43aa51e32839c9d71dc67eb1e4221bb04af1033a32e3923d46f9effa942"},
{file = "pydantic-1.10.12.tar.gz", hash = "sha256:0fe8a415cea8f340e7a9af9c54fc71a649b43e8ca3cc732986116b3cb135d303"},
]
[package.dependencies]
typing-extensions = ">=4.2.0"
[package.extras]
dotenv = ["python-dotenv (>=0.10.4)"]
email = ["email-validator (>=1.0.3)"]
[[package]]
name = "pytest"
version = "7.4.0"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-7.4.0-py3-none-any.whl", hash = "sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32"},
{file = "pytest-7.4.0.tar.gz", hash = "sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a"},
]
[package.dependencies]
colorama = {version = "*", markers = "sys_platform == \"win32\""}
exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
iniconfig = "*"
packaging = "*"
pluggy = ">=0.12,<2.0"
tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
[package.extras]
testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
[[package]]
name = "pytest-asyncio"
version = "0.21.1"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-asyncio-0.21.1.tar.gz", hash = "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d"},
{file = "pytest_asyncio-0.21.1-py3-none-any.whl", hash = "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"},
]
[package.dependencies]
pytest = ">=7.0.0"
[package.extras]
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"]
[[package]]
name = "pywin32"
version = "306"
description = "Python for Window Extensions"
optional = false
python-versions = "*"
files = [
{file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"},
{file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"},
{file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"},
{file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"},
{file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"},
{file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"},
{file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"},
{file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"},
{file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"},
{file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"},
{file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"},
{file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"},
{file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"},
{file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"},
]
[[package]]
name = "pyyaml"
version = "6.0.1"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.6"
files = [
{file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
{file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
{file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
{file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
{file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"},
{file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"},
{file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"},
{file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"},
{file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"},
{file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"},
{file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"},
{file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"},
{file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"},
{file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
{file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
{file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
]
[[package]]
name = "requests"
version = "2.31.0"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.7"
files = [
{file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
{file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
]
[package.dependencies]
certifi = ">=2017.4.17"
charset-normalizer = ">=2,<4"
idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<3"
[package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "syrupy"
version = "4.0.1"
description = "Pytest Snapshot Test Utility"
optional = false
python-versions = ">=3.8.1,<4"
files = [
{file = "syrupy-4.0.1-py3-none-any.whl", hash = "sha256:53d3107cc5e18a5def189c721879cea2cdafdee34b879f602133ca08837d0e4b"},
{file = "syrupy-4.0.1.tar.gz", hash = "sha256:60e3e94782444e0f978cd3b207de32f6da3199b15a2db32eab02f83cebb63ae8"},
]
[package.dependencies]
colored = ">=1.3.92,<2.0.0"
pytest = ">=7.0.0,<8.0.0"
[[package]]
name = "text-generation"
version = "0.6.0"
description = "Hugging Face Text Generation Python Client"
optional = false
python-versions = ">=3.7,<4.0"
files = [
{file = "text-generation-0.6.0.tar.gz", hash = "sha256:48560e7a67b9a88b38335382d357f66e23b5a75f53971ccd436fc6f696a00815"},
{file = "text_generation-0.6.0-py3-none-any.whl", hash = "sha256:42ae7f7c9ff11f3a6c9d210f94fe708fe693eede79c6776da727456da1606ef9"},
]
[package.dependencies]
aiohttp = ">=3.8,<4.0"
huggingface-hub = ">=0.12,<1.0"
pydantic = ">=1.10,<2.0"
[[package]]
name = "tomli"
version = "2.0.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.7"
files = [
{file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
{file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
]
[[package]]
name = "tqdm"
version = "4.66.1"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
files = [
{file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"},
{file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"},
]
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
[package.extras]
dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"]
notebook = ["ipywidgets (>=6)"]
slack = ["slack-sdk"]
telegram = ["requests"]
[[package]]
name = "typing-extensions"
version = "4.7.1"
description = "Backported and Experimental Type Hints for Python 3.7+"
optional = false
python-versions = ">=3.7"
files = [
{file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"},
{file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"},
]
[[package]]
name = "urllib3"
version = "2.0.4"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.7"
files = [
{file = "urllib3-2.0.4-py3-none-any.whl", hash = "sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4"},
{file = "urllib3-2.0.4.tar.gz", hash = "sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11"},
]
[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "websocket-client"
version = "1.6.2"
description = "WebSocket client for Python with low level API options"
optional = false
python-versions = ">=3.8"
files = [
{file = "websocket-client-1.6.2.tar.gz", hash = "sha256:53e95c826bf800c4c465f50093a8c4ff091c7327023b10bfaff40cf1ef170eaa"},
{file = "websocket_client-1.6.2-py3-none-any.whl", hash = "sha256:ce54f419dfae71f4bdba69ebe65bf7f0a93fe71bc009ad3a010aacc3eebad537"},
]
[package.extras]
docs = ["Sphinx (>=6.0)", "sphinx-rtd-theme (>=1.1.0)"]
optional = ["python-socks", "wsaccel"]
test = ["websockets"]
[[package]]
name = "yarl"
version = "1.9.2"
description = "Yet another URL library"
optional = false
python-versions = ">=3.7"
files = [
{file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"},
{file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"},
{file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"},
{file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"},
{file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"},
{file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"},
{file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"},
{file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"},
{file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"},
{file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"},
{file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"},
{file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"},
{file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"},
{file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"},
{file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"},
{file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"},
{file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"},
{file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"},
{file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"},
{file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"},
{file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"},
{file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"},
{file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"},
{file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"},
{file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"},
{file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"},
{file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"},
{file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"},
{file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"},
{file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"},
{file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"},
{file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"},
{file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"},
{file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"},
{file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"},
{file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"},
{file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"},
{file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"},
{file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"},
{file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"},
{file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"},
{file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"},
{file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"},
{file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"},
{file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"},
{file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"},
{file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"},
{file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"},
{file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"},
{file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"},
{file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"},
{file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"},
{file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"},
{file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"},
{file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"},
{file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"},
{file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"},
{file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"},
{file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"},
{file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"},
{file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"},
{file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"},
{file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"},
{file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"},
{file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"},
{file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"},
{file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"},
{file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"},
{file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"},
{file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"},
{file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"},
{file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"},
{file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"},
{file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"},
]
[package.dependencies]
idna = ">=2.0"
multidict = ">=4.0"
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<3.13"
content-hash = "bdad1d22d29138010cd6b11e1b92dc0630b35634422413a8456dc85a15bee05e"
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/integration-tests/requirements.txt | aiohttp==3.8.5 ; python_version >= "3.9" and python_version < "3.13"
aiosignal==1.3.1 ; python_version >= "3.9" and python_version < "3.13"
async-timeout==4.0.3 ; python_version >= "3.9" and python_version < "3.13"
attrs==23.1.0 ; python_version >= "3.9" and python_version < "3.13"
certifi==2023.7.22 ; python_version >= "3.9" and python_version < "3.13"
charset-normalizer==3.2.0 ; python_version >= "3.9" and python_version < "3.13"
colorama==0.4.6 ; python_version >= "3.9" and python_version < "3.13" and (sys_platform == "win32" or platform_system == "Windows")
colored==1.4.4 ; python_version >= "3.9" and python_version < "3.13"
docker==6.1.3 ; python_version >= "3.9" and python_version < "3.13"
exceptiongroup==1.1.3 ; python_version >= "3.9" and python_version < "3.11"
filelock==3.12.3 ; python_version >= "3.9" and python_version < "3.13"
frozenlist==1.4.0 ; python_version >= "3.9" and python_version < "3.13"
fsspec==2023.6.0 ; python_version >= "3.9" and python_version < "3.13"
huggingface-hub==0.16.4 ; python_version >= "3.9" and python_version < "3.13"
idna==3.4 ; python_version >= "3.9" and python_version < "3.13"
iniconfig==2.0.0 ; python_version >= "3.9" and python_version < "3.13"
multidict==6.0.4 ; python_version >= "3.9" and python_version < "3.13"
packaging==23.1 ; python_version >= "3.9" and python_version < "3.13"
pluggy==1.3.0 ; python_version >= "3.9" and python_version < "3.13"
pydantic==1.10.12 ; python_version >= "3.9" and python_version < "3.13"
pytest-asyncio==0.21.1 ; python_version >= "3.9" and python_version < "3.13"
pytest==7.4.0 ; python_version >= "3.9" and python_version < "3.13"
pywin32==306 ; python_version >= "3.9" and python_version < "3.13" and sys_platform == "win32"
pyyaml==6.0.1 ; python_version >= "3.9" and python_version < "3.13"
requests==2.31.0 ; python_version >= "3.9" and python_version < "3.13"
syrupy==4.0.1 ; python_version >= "3.9" and python_version < "3.13"
text-generation==0.6.0 ; python_version >= "3.9" and python_version < "3.13"
tomli==2.0.1 ; python_version >= "3.9" and python_version < "3.11"
tqdm==4.66.1 ; python_version >= "3.9" and python_version < "3.13"
typing-extensions==4.7.1 ; python_version >= "3.9" and python_version < "3.13"
urllib3==2.0.4 ; python_version >= "3.9" and python_version < "3.13"
websocket-client==1.6.2 ; python_version >= "3.9" and python_version < "3.13"
yarl==1.9.2 ; python_version >= "3.9" and python_version < "3.13"
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/integration-tests/pytest.ini | [pytest]
addopts = --snapshot-warn-unused
asyncio_mode = auto
markers =
private: marks tests as requiring an admin hf token (deselect with '-m "not private"') | 0 |