Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
# Load the tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained("mohamedtolba/franco-arabic") | |
model = AutoModelForSeq2SeqLM.from_pretrained("mohamedtolba/franco-arabic") | |
def translate_text(text): | |
# Tokenize the input text | |
inputs = tokenizer(text, return_tensors="pt", truncation=True) | |
# Generate the output text | |
outputs = model.generate(**inputs) | |
# Decode the generated text | |
translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return translated_text | |
# Create the Gradio interface | |
iface = gr.Interface( | |
fn=translate_text, | |
inputs=gr.Textbox(lines=2, placeholder="Enter Franco-Arabic text here..."), | |
outputs="text", | |
title="Franco-Arabic to Arabic Translator", | |
description="Translate Franco-Arabic text to Arabic using the mohamedtolba/franco-arabic model." | |
) | |
if __name__ == "__main__": | |
iface.launch() | |