dynamicmortal commited on
Commit
c6d8763
1 Parent(s): 66a26ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -39
app.py CHANGED
@@ -1,45 +1,52 @@
1
- from flask import Flask, render_template, jsonify
 
 
 
2
 
3
  app = Flask(__name__)
4
 
5
- # Replace with your AssemblyAI API key
6
- ASSEMBLYAI_API_KEY = "67883cd71f0d4a58a27a34e058f0d924"
7
-
8
- # URL of the file to transcribe
9
- FILE_URL = "/content/call.mp3"
10
-
11
- # You can also transcribe a local file by passing in a file path
12
- # FILE_URL = './path/to/file.mp3'
13
-
14
- @app.route('/analyze', methods=['GET'])
15
- def analyze():
16
- # Transcribe audio to text
17
- transcriber = aai.Transcriber(api_key=ASSEMBLYAI_API_KEY)
18
- transcript = transcriber.transcribe(FILE_URL)
19
- text = transcript.text
20
-
21
- # Perform sentiment analysis
22
- sentiment_analyzer = pipeline("sentiment-analysis")
23
- sentiment = sentiment_analyzer(text)
24
-
25
- # Perform emotion analysis
26
- emotion_analyzer = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True)
27
- emotions = emotion_analyzer(text)
28
-
29
- # Format results
30
- result = {
31
- "transcript": text,
32
- "sentiment": {
33
- "score": sentiment[0]['score'],
34
- "label": sentiment[0]['label']
35
- },
36
- "emotion": [{
37
- "label": emotion['label'],
38
- "score": emotion['score']
39
- } for emotion in emotions[0]]
40
- }
41
-
42
- return render_template('analyze.html', result=result)
 
 
 
 
43
 
44
  if __name__ == '__main__':
45
  app.run(debug=True)
 
1
+ from flask import Flask, render_template, request, jsonify
2
+ import torch
3
+ from transformers import pipeline
4
+ import gradio as gr
5
 
6
  app = Flask(__name__)
7
 
8
+ # Load the automatic speech recognition model
9
+ pipe = pipeline("automatic-speech-recognition",
10
+ "openai/whisper-large-v3",
11
+ torch_dtype=torch.float16,
12
+ device="cuda:0")
13
+
14
+ # Load the emotion classification model
15
+ emotion_classifier = pipeline(
16
+ "text-classification",
17
+ model="j-hartmann/emotion-english-distilroberta-base",
18
+ return_all_scores=True
19
+ )
20
+
21
+ def transcribe(audio_file, task):
22
+ if audio_file is None:
23
+ return "Please upload or record an audio file."
24
+
25
+ # Check if the audio file is in bytes format (drag-and-drop file)
26
+ if isinstance(audio_file, bytes):
27
+ text = pipe(audio_file, generate_kwargs={"task": task}, return_timestamps=True)["text"]
28
+ else:
29
+ # Handle the case where the file is uploaded using the file uploader
30
+ text = pipe(audio_file.name, generate_kwargs={"task": task}, return_timestamps=True)["text"]
31
+
32
+ return text
33
+
34
+ @app.route('/')
35
+ def index():
36
+ return render_template('index.html')
37
+
38
+ @app.route('/transcribe', methods=['POST'])
39
+ def transcribe_endpoint():
40
+ audio_file = request.files.get('audio_file')
41
+ task = request.form.get('task')
42
+ text = transcribe(audio_file, task)
43
+ return jsonify({'text': text})
44
+
45
+ @app.route('/classify_emotion', methods=['POST'])
46
+ def classify_emotion_endpoint():
47
+ text = request.form.get('text')
48
+ result = emotion_classifier(text)
49
+ return jsonify(result)
50
 
51
  if __name__ == '__main__':
52
  app.run(debug=True)