Spaces:
Running
Running
mrfakename
commited on
Commit
•
7924f09
1
Parent(s):
6f4d564
Update app.py
Browse files
app.py
CHANGED
@@ -20,9 +20,14 @@ from transformers import pipeline
|
|
20 |
import spaces
|
21 |
import librosa
|
22 |
from txtsplit import txtsplit
|
|
|
|
|
23 |
|
24 |
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
25 |
|
|
|
|
|
|
|
26 |
pipe = pipeline(
|
27 |
"automatic-speech-recognition",
|
28 |
model="openai/whisper-large-v3-turbo",
|
@@ -80,6 +85,9 @@ E2TTS_ema_model, E2TTS_base_model = load_model("E2TTS_Base", UNetT, E2TTS_model_
|
|
80 |
@spaces.GPU
|
81 |
def infer(ref_audio_orig, ref_text, gen_text, exp_name, remove_silence, progress = gr.Progress()):
|
82 |
print(gen_text)
|
|
|
|
|
|
|
83 |
gr.Info("Converting audio...")
|
84 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
|
85 |
aseg = AudioSegment.from_file(ref_audio_orig)
|
|
|
20 |
import spaces
|
21 |
import librosa
|
22 |
from txtsplit import txtsplit
|
23 |
+
from detoxify import Detoxify
|
24 |
+
|
25 |
|
26 |
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
27 |
|
28 |
+
model = Detoxify('original', device=device)
|
29 |
+
|
30 |
+
|
31 |
pipe = pipeline(
|
32 |
"automatic-speech-recognition",
|
33 |
model="openai/whisper-large-v3-turbo",
|
|
|
85 |
@spaces.GPU
|
86 |
def infer(ref_audio_orig, ref_text, gen_text, exp_name, remove_silence, progress = gr.Progress()):
|
87 |
print(gen_text)
|
88 |
+
if model.predict(text)['toxicity'] > 0.8:
|
89 |
+
print("Flagged for toxicity:", gen_text)
|
90 |
+
raise gr.Error("Your text was flagged for toxicity, please try again with a different text.")
|
91 |
gr.Info("Converting audio...")
|
92 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
|
93 |
aseg = AudioSegment.from_file(ref_audio_orig)
|