Spaces:
Running
on
Zero
Running
on
Zero
SunderAli17
commited on
Commit
•
6802c18
1
Parent(s):
6caf646
Create text_utils.py
Browse files- utils/text_utils.py +76 -0
utils/text_utils.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
def tokenize_prompt(tokenizer, prompt):
|
4 |
+
text_inputs = tokenizer(
|
5 |
+
prompt,
|
6 |
+
padding="max_length",
|
7 |
+
max_length=tokenizer.model_max_length,
|
8 |
+
truncation=True,
|
9 |
+
return_tensors="pt",
|
10 |
+
)
|
11 |
+
text_input_ids = text_inputs.input_ids
|
12 |
+
return text_input_ids
|
13 |
+
|
14 |
+
|
15 |
+
# Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt
|
16 |
+
def encode_prompt(text_encoders, tokenizers, prompt, text_input_ids_list=None):
|
17 |
+
prompt_embeds_list = []
|
18 |
+
|
19 |
+
for i, text_encoder in enumerate(text_encoders):
|
20 |
+
if tokenizers is not None:
|
21 |
+
tokenizer = tokenizers[i]
|
22 |
+
text_input_ids = tokenize_prompt(tokenizer, prompt)
|
23 |
+
else:
|
24 |
+
assert text_input_ids_list is not None
|
25 |
+
text_input_ids = text_input_ids_list[i]
|
26 |
+
|
27 |
+
prompt_embeds = text_encoder(
|
28 |
+
text_input_ids.to(text_encoder.device),
|
29 |
+
output_hidden_states=True,
|
30 |
+
)
|
31 |
+
|
32 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
33 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
34 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
35 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
36 |
+
prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1)
|
37 |
+
prompt_embeds_list.append(prompt_embeds)
|
38 |
+
|
39 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
40 |
+
pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1)
|
41 |
+
return prompt_embeds, pooled_prompt_embeds
|
42 |
+
|
43 |
+
|
44 |
+
def add_tokens(tokenizers, tokens, text_encoders):
|
45 |
+
new_token_indices = {}
|
46 |
+
for idx, tokenizer in enumerate(tokenizers):
|
47 |
+
for token in tokens:
|
48 |
+
num_added_tokens = tokenizer.add_tokens(token)
|
49 |
+
if num_added_tokens == 0:
|
50 |
+
raise ValueError(
|
51 |
+
f"The tokenizer already contains the token {token}. Please pass a different"
|
52 |
+
" `placeholder_token` that is not already in the tokenizer."
|
53 |
+
)
|
54 |
+
|
55 |
+
new_token_indices[f"{idx}_{token}"] = num_added_tokens
|
56 |
+
# resize embedding layers to avoid crash. We will never actually use these.
|
57 |
+
text_encoders[idx].resize_token_embeddings(len(tokenizer), pad_to_multiple_of=128)
|
58 |
+
|
59 |
+
return new_token_indices
|
60 |
+
|
61 |
+
|
62 |
+
def patch_embedding_forward(embedding_layer, new_tokens, new_embeddings):
|
63 |
+
|
64 |
+
def new_forward(input):
|
65 |
+
embedded_text = torch.nn.functional.embedding(
|
66 |
+
input, embedding_layer.weight, embedding_layer.padding_idx, embedding_layer.max_norm,
|
67 |
+
embedding_layer.norm_type, embedding_layer.scale_grad_by_freq, embedding_layer.sparse)
|
68 |
+
|
69 |
+
replace_indices = (input == new_tokens)
|
70 |
+
|
71 |
+
if torch.count_nonzero(replace_indices) > 0:
|
72 |
+
embedded_text[replace_indices] = new_embeddings
|
73 |
+
|
74 |
+
return embedded_text
|
75 |
+
|
76 |
+
embedding_layer.forward = new_forward
|