Naksh786 commited on
Commit
09a9211
β€’
1 Parent(s): 142a702

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +190 -14
app.py CHANGED
@@ -1,20 +1,196 @@
 
 
1
  import torch
2
- from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech
3
- import soundfile as sf
 
4
 
5
- # Initialize the model and processor from Hugging Face
6
- processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
7
- model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
8
 
9
- def text_to_speech(text):
10
- # Process the input text into tokens
11
- inputs = processor(text, return_tensors="pt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- # Generate speech
14
- with torch.no_grad():
15
- speech = model.generate_speech(inputs.input_ids)
 
16
 
17
- # Save the generated speech as a WAV file
18
- sf.write('output.wav', speech.squeeze().cpu().numpy(), 16000)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- return "output.wav"
 
 
1
+ import spaces
2
+ import gradio as gr
3
  import torch
4
+ from transformers.models.speecht5.number_normalizer import EnglishNumberNormalizer
5
+ from string import punctuation
6
+ import re
7
 
 
 
 
8
 
9
+ from parler_tts import ParlerTTSForConditionalGeneration
10
+ from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed
11
+
12
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
13
+
14
+
15
+ repo_id = "parler-tts/parler-tts-mini-v1"
16
+ repo_id_large = "parler-tts/parler-tts-large-v1"
17
+
18
+ model = ParlerTTSForConditionalGeneration.from_pretrained(repo_id).to(device)
19
+ model_large = ParlerTTSForConditionalGeneration.from_pretrained(repo_id_large).to(device)
20
+ tokenizer = AutoTokenizer.from_pretrained(repo_id)
21
+ feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
22
+
23
+
24
+ SAMPLE_RATE = feature_extractor.sampling_rate
25
+ SEED = 42
26
+
27
+ default_text = "All of the data, pre-processing, training code, and weights are released publicly under a permissive license, enabling the community to build on our work and develop their own powerful models."
28
+ default_description = "Laura's voice is monotone yet slightly fast in delivery, with a very close recording that almost has no background noise."
29
+ examples = [
30
+ [
31
+ "This version introduces speaker consistency across generations, characterized by their name. For example, Jon, Lea, Gary, Jenna, Mike and Laura.",
32
+ "Gary's voice is monotone yet slightly fast in delivery, with a very close recording that has no background noise.",
33
+ None,
34
+ ],
35
+ [
36
+ '''There's 34 speakers. To take advantage of this, simply adapt your text description to specify which speaker to use: "Mike speaks animatedly...".''',
37
+ "Gary speaks slightly animatedly and slightly slowly in delivery, with a very close recording that has no background noise.",
38
+ None
39
+ ],
40
+ [
41
+ "'This is the best time of my life, Bartley,' she said happily.",
42
+ "A female speaker delivers a slightly expressive and animated speech with a moderate speed. The recording features a low-pitch voice and slight background noise, creating a close-sounding audio experience.",
43
+ None,
44
+ ],
45
+ [
46
+ "Montrose also, after having experienced still more variety of good and bad fortune, threw down his arms, and retired out of the kingdom.",
47
+ "A man voice speaks slightly slowly with very noisy background, carrying a low-pitch tone and displaying a touch of expressiveness and animation. The sound is very distant, adding an air of intrigue.",
48
+ None
49
+ ],
50
+ [
51
+ "Once upon a time, in the depth of winter, when the flakes of snow fell like feathers from the clouds, a queen sat sewing at her pal-ace window, which had a carved frame of black wood.",
52
+ "In a very poor recording quality, a female speaker delivers her slightly expressive and animated words with a fast pace. There's high level of background noise and a very distant-sounding reverberation. Her voice is slightly higher pitched than average.",
53
+ None,
54
+ ],
55
+ ]
56
+
57
+ number_normalizer = EnglishNumberNormalizer()
58
+
59
+ def preprocess(text):
60
+ text = number_normalizer(text).strip()
61
+ text = text.replace("-", " ")
62
+ if text[-1] not in punctuation:
63
+ text = f"{text}."
64
+
65
+ abbreviations_pattern = r'\b[A-Z][A-Z\.]+\b'
66
 
67
+ def separate_abb(chunk):
68
+ chunk = chunk.replace(".","")
69
+ print(chunk)
70
+ return " ".join(chunk)
71
 
72
+ abbreviations = re.findall(abbreviations_pattern, text)
73
+ for abv in abbreviations:
74
+ if abv in text:
75
+ text = text.replace(abv, separate_abb(abv))
76
+ return text
77
+
78
+ @spaces.GPU
79
+ def gen_tts(text, description, use_large=False):
80
+ inputs = tokenizer(description.strip(), return_tensors="pt").to(device)
81
+ prompt = tokenizer(preprocess(text), return_tensors="pt").to(device)
82
+
83
+ set_seed(SEED)
84
+ if use_large:
85
+ generation = model_large.generate(
86
+ input_ids=inputs.input_ids, prompt_input_ids=prompt.input_ids, attention_mask=inputs.attention_mask, prompt_attention_mask=prompt.attention_mask, do_sample=True, temperature=1.0
87
+ )
88
+ else:
89
+ generation = model.generate(
90
+ input_ids=inputs.input_ids, prompt_input_ids=prompt.input_ids, attention_mask=inputs.attention_mask, prompt_attention_mask=prompt.attention_mask, do_sample=True, temperature=1.0
91
+ )
92
+ audio_arr = generation.cpu().numpy().squeeze()
93
+
94
+ return SAMPLE_RATE, audio_arr
95
+
96
+
97
+ css = """
98
+ #share-btn-container {
99
+ display: flex;
100
+ padding-left: 0.5rem !important;
101
+ padding-right: 0.5rem !important;
102
+ background-color: #000000;
103
+ justify-content: center;
104
+ align-items: center;
105
+ border-radius: 9999px !important;
106
+ width: 13rem;
107
+ margin-top: 10px;
108
+ margin-left: auto;
109
+ flex: unset !important;
110
+ }
111
+ #share-btn {
112
+ all: initial;
113
+ color: #ffffff;
114
+ font-weight: 600;
115
+ cursor: pointer;
116
+ font-family: 'IBM Plex Sans', sans-serif;
117
+ margin-left: 0.5rem !important;
118
+ padding-top: 0.25rem !important;
119
+ padding-bottom: 0.25rem !important;
120
+ right:0;
121
+ }
122
+ #share-btn * {
123
+ all: unset !important;
124
+ }
125
+ #share-btn-container div:nth-child(-n+2){
126
+ width: auto !important;
127
+ min-height: 0px !important;
128
+ }
129
+ #share-btn-container .wrap {
130
+ display: none !important;
131
+ }
132
+ """
133
+ with gr.Blocks(css=css) as block:
134
+ gr.HTML(
135
+ """
136
+ <div style="text-align: center; max-width: 700px; margin: 0 auto;">
137
+ <div
138
+ style="
139
+ display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;
140
+ "
141
+ >
142
+ <h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;">
143
+ Parler-TTS πŸ—£οΈ
144
+ </h1>
145
+ </div>
146
+ </div>
147
+ """
148
+ )
149
+ gr.HTML(
150
+ f"""
151
+ <p><a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> is a training and inference library for
152
+ high-fidelity text-to-speech (TTS) models.</p>
153
+ <p>The models demonstrated here, Parler-TTS <a href="https://huggingface.co/parler-tts/parler-tts-mini-v1">Mini v1</a> and <a href="https://huggingface.co/parler-tts/parler-tts-large-v1">Large v1</a>,
154
+ are trained using 45k hours of narrated English audiobooks. It generates high-quality speech
155
+ with features that can be controlled using a simple text prompt (e.g. gender, background noise, speaking rate, pitch and reverberation).</p>
156
+
157
+ <p>By default, Parler-TTS generates 🎲 random voice. To ensure 🎯 <b> speaker consistency </b> across generations, these checkpoints were also trained on 34 speakers, characterized by name (e.g. Jon, Lea, Gary, Jenna, Mike, Laura). Learn more about this <a href="https://github.com/huggingface/parler-tts/blob/main/INFERENCE.md#speaker-consistency"> here </a>.</p>
158
+
159
+ <p>To take advantage of this, simply adapt your text description to specify which speaker to use: `Jon's voice is monotone...`</p>
160
+ """
161
+ )
162
+ with gr.Row():
163
+ with gr.Column():
164
+ input_text = gr.Textbox(label="Input Text", lines=2, value=default_text, elem_id="input_text")
165
+ description = gr.Textbox(label="Description", lines=2, value=default_description, elem_id="input_description")
166
+ use_large = gr.Checkbox(value=False, label="Use Large checkpoint", info="Generate with Parler-TTS Large v1 instead of Mini v1 - Better but way slower.")
167
+ run_button = gr.Button("Generate Audio", variant="primary")
168
+ with gr.Column():
169
+ audio_out = gr.Audio(label="Parler-TTS generation", type="numpy", elem_id="audio_out")
170
+
171
+ inputs = [input_text, description, use_large]
172
+ outputs = [audio_out]
173
+ run_button.click(fn=gen_tts, inputs=inputs, outputs=outputs, queue=True)
174
+ gr.Examples(examples=examples, fn=gen_tts, inputs=inputs, outputs=outputs, cache_examples=True)
175
+ gr.HTML(
176
+ """
177
+ <p>Tips for ensuring good generation:
178
+ <ul>
179
+ <li>Include the term "very clear audio" to generate the highest quality audio, and "very noisy audio" for high levels of background noise</li>
180
+ <li>Punctuation can be used to control the prosody of the generations, e.g. use commas to add small breaks in speech</li>
181
+ <li>The remaining speech features (gender, speaking rate, pitch and reverberation) can be controlled directly through the prompt</li>
182
+ </ul>
183
+ </p>
184
+
185
+ <p>Parler-TTS can be much faster. We give some tips on how to generate much more quickly in this <a href="https://github.com/huggingface/parler-tts/blob/main/INFERENCE.md"> inference guide</a>. Think SDPA, torch.compile, batching and streaming!</p>
186
+
187
+ <p>If you want to find out more about how this model was trained and even fine-tune it yourself, check-out the
188
+ <a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> repository on GitHub.</p>
189
+
190
+ <p>The Parler-TTS codebase and its associated checkpoints are licensed under <a href='https://github.com/huggingface/parler-tts?tab=Apache-2.0-1-ov-file#readme'> Apache 2.0</a>.</p>
191
+ """
192
+ )
193
+
194
 
195
+ block.queue()
196
+ block.launch(share=True)