Alfasign commited on
Commit
c5cfb64
1 Parent(s): 6a58a75

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -8
app.py CHANGED
@@ -1,13 +1,28 @@
 
 
1
  from transformers import GPT2LMHeadModel, GPT2Tokenizer
2
 
3
- MODEL_NAME = "gpt2" # Ändern Sie dies entsprechend
 
 
 
 
 
4
 
5
- tokenizer = GPT2Tokenizer.from_pretrained(MODEL_NAME)
6
- model = GPT2LMHeadModel.from_pretrained(MODEL_NAME)
 
 
 
7
 
8
- prompt = "Was ist künstliche Intelligenz?" # Ändern Sie dies entsprechend
9
- inputs = tokenizer.encode(prompt, return_tensors="pt")
10
- outputs = model.generate(inputs, max_length=200, num_return_sequences=5)
11
 
12
- for i, output in enumerate(outputs):
13
- print(f"Output {i+1}: {tokenizer.decode(output)}")
 
 
 
 
 
 
 
 
1
+
2
+ import streamlit as st
3
  from transformers import GPT2LMHeadModel, GPT2Tokenizer
4
 
5
+ @st.cache(allow_output_mutation=True)
6
+ def load_model():
7
+ MODEL_NAME = "gpt2" # Ändern Sie dies entsprechend
8
+ tokenizer = GPT2Tokenizer.from_pretrained(MODEL_NAME)
9
+ model = GPT2LMHeadModel.from_pretrained(MODEL_NAME)
10
+ return model, tokenizer
11
 
12
+ def generate_text(prompt, model, tokenizer):
13
+ inputs = tokenizer.encode(prompt, return_tensors="pt")
14
+ outputs = model.generate(inputs, max_length=200, num_return_sequences=5)
15
+ generated_text = [tokenizer.decode(output) for output in outputs]
16
+ return generated_text
17
 
18
+ model, tokenizer = load_model()
 
 
19
 
20
+ st.title("Textgenerierung mit GPT-2")
21
+ prompt = st.text_input("Geben Sie einen Prompt ein:")
22
+ if prompt:
23
+ with st.spinner("Generieren von Text..."):
24
+ generated_text = generate_text(prompt, model, tokenizer)
25
+ st.header("Generierter Text:")
26
+ for i, text in enumerate(generated_text):
27
+ st.subheader(f"Option {i+1}:")
28
+ st.write(text)