|
import tensorflow |
|
import torch |
|
import gradio as gr |
|
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
classifier = pipeline("text-classification") |
|
|
|
|
|
def generate_mini_apps(theme): |
|
|
|
classification = classifier(theme) |
|
|
|
|
|
if classification[0]['label'] == 'Productivity': |
|
mini_apps = [ |
|
'Idea-to-Codebase Generator', |
|
'Automated GitHub Repo Guardian Angel', |
|
'AI-Powered IDE' |
|
] |
|
elif classification[0]['label'] == 'Creativity': |
|
mini_apps = [ |
|
'Brainstorming Assistant', |
|
'Mood Board Generator', |
|
'Writing Assistant' |
|
] |
|
elif classification[0]['label'] == 'Well-being': |
|
mini_apps = [ |
|
'Meditation Guide', |
|
'Mood Tracker', |
|
'Sleep Tracker' |
|
] |
|
else: |
|
mini_apps = ["No matching mini-apps found. Try a different theme."] |
|
|
|
|
|
return mini_apps |
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained("./", trust_remote_code=True) |
|
tokenizer = AutoTokenizer.from_pretrained("./") |
|
|
|
|
|
def generate_text(input_text): |
|
inputs = tokenizer(input_text, return_tensors="pt") |
|
output = model.generate(**inputs, max_length=50, num_return_sequences=1) |
|
return tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
demo = gr.Interface( |
|
fn=generate_mini_apps, |
|
inputs=gr.Textbox(label="Enter a theme for your life"), |
|
outputs=gr.Textbox(label="Generated Mini-Apps"), |
|
title="AI4ME: Personalized AI Tools", |
|
description="Enter your hobby/interest/job and we'll generate a set of AI-powered mini-apps tailored to your specific needs." |
|
) |
|
|
|
|
|
with gr.Blocks() as demo_text: |
|
gr.Markdown("## Text Generation") |
|
input_text = gr.Textbox(label="Enter your text") |
|
output_text = gr.Textbox(label="Generated Text") |
|
input_text.submit(generate_text, inputs=input_text, outputs=output_text) |
|
|
|
|
|
demo.launch(share=True) |
|
demo_text.launch(share=True) |