import torch from transformers import AutoTokenizer, AutoModelForCausalLM import streamlit as st # Загрузка модели и токенизатора model_name = "Richieburundi/Ariginalmodel" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) def generate_text(input_text, max_length=50): inputs = tokenizer(input_text, return_tensors="pt") outputs = model.generate(**inputs, max_length=max_length, num_return_sequences=1, do_sample=True, top_k=50, top_p=0.95, num_beams=5) generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) return generated_text st.title("AI Text Generation") st.write("Enter some text, and the AI will generate a response.") input_text = st.text_area("Input Text", height=200) if st.button("Generate Text"): try: generated_text = generate_text(input_text) st.write("Generated Text:") st.write(generated_text) except Exception as e: st.error(f"Error generating text: {e}")