File size: 936 Bytes
b60996a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer

# Load the model and tokenizer from the repository
model_name = "Dumele/autotrain-shhsb-57a2l"
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)

# Define the text generation function
def generate_text(prompt):
    pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
    result = pipe(prompt)
    return result[0]['generated_text']

# Create the Gradio interface
iface = gr.Interface(
    fn=generate_text,
    inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."),
    outputs="text",
    title="Text Generation with Mistral-7B",
    description="Generate text using the fine-tuned Mistral-7B model from the Dumele repository."
)

# Launch the Gradio interface
iface.launch()