Lenylvt commited on
Commit
f537405
1 Parent(s): 0121f2d

Use torch to determined if Whisper use GPU or CPU

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -6,6 +6,7 @@ import pysrt
6
  import pandas as pd
7
  from transformers import MarianMTModel, MarianTokenizer
8
  import ffmpeg
 
9
 
10
  # Configuration initiale et chargement des données
11
  url = "https://huggingface.co/Lenylvt/LanguageISO/resolve/main/iso.md"
@@ -77,8 +78,8 @@ def translate_srt(input_file_path, source_language_code, target_language_code, p
77
 
78
  # Fonction pour transcrire l'audio d'une vidéo en texte
79
  def transcribe(audio_file_path, model_size="base"):
80
- device = "cpu"
81
- compute_type = "int8"
82
  model = WhisperModel(model_size, device=device, compute_type=compute_type)
83
  segments, _ = model.transcribe(audio_file_path)
84
  transcription_with_timestamps = [
 
6
  import pandas as pd
7
  from transformers import MarianMTModel, MarianTokenizer
8
  import ffmpeg
9
+ import torch
10
 
11
  # Configuration initiale et chargement des données
12
  url = "https://huggingface.co/Lenylvt/LanguageISO/resolve/main/iso.md"
 
78
 
79
  # Fonction pour transcrire l'audio d'une vidéo en texte
80
  def transcribe(audio_file_path, model_size="base"):
81
+ device = "cuda" if torch.cuda.is_available() else "cpu"
82
+ compute_type = "float16" if device == "cuda" else "int8"
83
  model = WhisperModel(model_size, device=device, compute_type=compute_type)
84
  segments, _ = model.transcribe(audio_file_path)
85
  transcription_with_timestamps = [