import streamlit as st from transformers import AutoModelForCausalLM, AutoTokenizer # Load the model and tokenizer from the Hugging Face repository model_name = "aifeifei798/DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored" st.write(f"Loading model {model_name}...") # The model and tokenizer will be downloaded and cached locally tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Function to generate text based on user input def generate_text(prompt): inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=100) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Streamlit UI st.title("DarkIdol Text Generation") user_input = st.text_area("Enter your prompt:") if st.button("Generate"): with st.spinner("Generating..."): output_text = generate_text(user_input) st.write("Generated Text:") st.write(output_text)