Spaces:
Running
Running
import gradio as gr | |
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import random | |
import os | |
# Force CPU usage if you're having GPU issues | |
os.environ['CUDA_VISIBLE_DEVICES'] = '' | |
# Load model and tokenizer | |
model_name = "gpt2-medium" # You can change this to your fine-tuned model | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Set up padding token | |
tokenizer.pad_token = tokenizer.eos_token | |
model.config.pad_token_id = model.config.eos_token_id | |
# Sample prompts for different styles | |
style_prompts = { | |
"Classic": "Here's a romantic pickup line: ", | |
"Nerdy": "Here's a tech-related pickup line: ", | |
"Flirty": "Here's a flirty pickup line: ", | |
"Funny": "Here's a humorous pickup line: " | |
} | |
# Sample sentiment adjustments | |
sentiment_adjustments = { | |
"Sweet": " Make it sweet and genuine.", | |
"Funny": " Make it lighthearted and humorous.", | |
"Sarcastic": " Add a touch of playful sarcasm." | |
} | |
def generate_pickup_line(style, sentiment, temperature=0.7, max_length=50): | |
"""Generate a pickup line based on selected style and sentiment.""" | |
try: | |
prompt = style_prompts[style] + sentiment_adjustments[sentiment] | |
# Set up the model inputs | |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True) | |
# Generate text | |
with torch.no_grad(): | |
outputs = model.generate( | |
inputs["input_ids"], | |
max_length=max_length, | |
temperature=temperature, | |
num_return_sequences=1, | |
pad_token_id=tokenizer.pad_token_id, | |
do_sample=True, | |
no_repeat_ngram_size=2 | |
) | |
# Decode and clean up the generated text | |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
pickup_line = generated_text.replace(prompt, "").strip() | |
return pickup_line if pickup_line else "Please try again!" | |
except Exception as e: | |
return f"An error occurred. Please try again! Error: {str(e)}" | |
def copy_to_clipboard(text): | |
"""Copy the generated pickup line to clipboard.""" | |
return text | |
# Create the Gradio interface | |
def create_interface(): | |
with gr.Blocks(css="footer {visibility: hidden}") as interface: | |
gr.Markdown("# π AI Pickup Line Generator") | |
gr.Markdown("Generate creative pickup lines with different styles and moods!") | |
with gr.Row(): | |
with gr.Column(): | |
style = gr.Dropdown( | |
choices=list(style_prompts.keys()), | |
value="Classic", | |
label="Pickup Line Style" | |
) | |
sentiment = gr.Dropdown( | |
choices=list(sentiment_adjustments.keys()), | |
value="Sweet", | |
label="Tone" | |
) | |
generate_btn = gr.Button("Generate Pickup Line π«", variant="primary") | |
with gr.Column(): | |
output = gr.Textbox( | |
label="Your Generated Pickup Line", | |
lines=3, | |
interactive=False | |
) | |
copy_btn = gr.Button("Copy to Clipboard π") | |
# Handle generation | |
generate_btn.click( | |
fn=generate_pickup_line, | |
inputs=[style, sentiment], | |
outputs=output | |
) | |
# Handle copying | |
copy_btn.click( | |
fn=copy_to_clipboard, | |
inputs=output, | |
outputs=None | |
) | |
gr.Markdown(""" | |
### Tips: | |
- Try different styles and tones to get varied results | |
- Click 'Generate' multiple times to get different lines | |
- Use the copy button to easily share your favorite lines | |
""") | |
return interface | |
# Launch the interface | |
if __name__ == "__main__": | |
interface = create_interface() | |
interface.launch() |