Lum4yx commited on
Commit
24006f5
·
verified ·
1 Parent(s): b08b4ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
  from textblob import TextBlob
3
- from transformers import AutoModelForSpeechSeqSeq, AutoProcessor, pipeline
4
  import torch
5
  import base64
6
  import numpy as np
@@ -16,7 +16,7 @@ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
16
  model_id = "openai/whisper-small"
17
 
18
  # 3. Load the model from pretrained weights
19
- model = AutoModelForSpeechSeqSeq.from_pretrained(
20
  model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
21
  )
22
  model.to(device)
 
1
  import gradio as gr
2
  from textblob import TextBlob
3
+ from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
4
  import torch
5
  import base64
6
  import numpy as np
 
16
  model_id = "openai/whisper-small"
17
 
18
  # 3. Load the model from pretrained weights
19
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
20
  model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
21
  )
22
  model.to(device)