|
import gradio as gr |
|
from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer |
|
import torch |
|
|
|
|
|
|
|
|
|
MODEL_NAME = "quocviethere/imdb-roberta" |
|
|
|
|
|
|
|
|
|
try: |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) |
|
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME) |
|
|
|
|
|
sentiment_pipeline = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) |
|
|
|
|
|
label_mapping = model.config.id2label |
|
print(f"Model label mapping: {label_mapping}") |
|
except Exception as e: |
|
print(f"Error loading model: {e}") |
|
raise |
|
|
|
|
|
|
|
|
|
def analyze_sentiment(text): |
|
try: |
|
|
|
result = sentiment_pipeline(text)[0] |
|
|
|
|
|
label = result['label'] |
|
score = result['score'] |
|
|
|
|
|
if label in label_mapping.values(): |
|
sentiment = "Positive π" if label == "POSITIVE" else "Negative π" |
|
else: |
|
|
|
sentiment = label |
|
print(f"Unexpected label received: {label}") |
|
|
|
confidence = f"Confidence: {round(score * 100, 2)}%" |
|
|
|
return sentiment, confidence |
|
except Exception as e: |
|
print(f"Error during sentiment analysis: {e}") |
|
return "Error", "Could not process the input." |
|
|
|
|
|
|
|
|
|
iface = gr.Interface( |
|
fn=analyze_sentiment, |
|
inputs=gr.Textbox( |
|
lines=5, |
|
placeholder="Enter a movie review here...", |
|
label="Movie Review" |
|
), |
|
outputs=[ |
|
gr.Textbox(label="Sentiment"), |
|
gr.Textbox(label="Confidence") |
|
], |
|
title="IMDb Sentiment Analysis with RoBERTa", |
|
description="Analyze the sentiment of movie reviews using a fine-tuned RoBERTa model.", |
|
examples=[ |
|
["I loved the cinematography and the story was captivating."], |
|
["The movie was a complete waste of time. Poor acting and boring plot."] |
|
], |
|
theme="default" |
|
) |
|
|
|
|
|
|
|
|
|
iface.launch() |
|
|