Spaces:
Sleeping
Sleeping
import streamlit as st | |
import os | |
from transformers import pipeline, set_seed | |
from huggingface_hub import HfFolder | |
import transformers | |
import torch | |
# Ensure the HF_TOKEN environment variable is set correctly | |
HF_TOKEN = os.getenv('HF_TOKEN') | |
if HF_TOKEN: | |
HfFolder.save_token(HF_TOKEN) | |
else: | |
st.warning("HF_TOKEN is not set. Proceeding without a token.") | |
# Use a valid model identifie | |
#generator = pipeline("text-generation", model="openai-community/gpt2") | |
generator = pipeline('text-generation', model='gpt2-large') | |
st.title("Text Generation") | |
st.write("Enter your text below.") | |
text = st.text_area("Your input") | |
st.write("Enter seed.") | |
seed_input = st.text_area("Set seed") | |
st.write("Enter max length.") | |
maxLength = st.text_area("max length") | |
# Convert seed input to integer | |
try: | |
seed = int(seed_input) | |
max_length = int(maxLength) | |
except ValueError: | |
seed = 1 | |
max_length = 100 | |
set_seed(seed) | |
if st.button("Generate Text"): | |
# Use default values or handle None appropriately | |
if seed is not None: | |
set_seed(seed) | |
if text and max_length: | |
# Generate text | |
out = generator(text, max_length=max_length, num_return_sequences=1) | |
st.json(out) | |
st.write(f"Reply: {out[0]['generated_text']}") | |