Chris Bracegirdle commited on
Commit
a370880
1 Parent(s): ac5e313

Another go

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -15,14 +15,15 @@ YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
15
  from transformers import AutoTokenizer
16
 
17
  tokenizer = AutoTokenizer.from_pretrained("openai/whisper-large-v3")
18
- assert tokenizer.is_fast
19
- tokenizer.save_pretrained("...")
20
 
21
 
22
  device = 0 if torch.cuda.is_available() else "cpu"
23
 
24
  pipe = pipeline(
25
  task="automatic-speech-recognition",
 
26
  model=MODEL_NAME,
27
  chunk_length_s=30,
28
  device=device,
 
15
  from transformers import AutoTokenizer
16
 
17
  tokenizer = AutoTokenizer.from_pretrained("openai/whisper-large-v3")
18
+ # assert tokenizer.is_fast
19
+ # tokenizer.save_pretrained("...")
20
 
21
 
22
  device = 0 if torch.cuda.is_available() else "cpu"
23
 
24
  pipe = pipeline(
25
  task="automatic-speech-recognition",
26
+ tokenizer=tokenizer,
27
  model=MODEL_NAME,
28
  chunk_length_s=30,
29
  device=device,