RLealz commited on
Commit
5b2b81d
1 Parent(s): 50da1ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -9
app.py CHANGED
@@ -1,18 +1,26 @@
1
  import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- # Load the model and tokenizer
5
  model_name = "aifeifei798/DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored"
 
 
 
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
- # Define the Streamlit app
10
- st.title("Text Generation with DarkIdol-Llama")
11
- prompt = st.text_input("Enter your prompt:")
12
-
13
- if st.button("Generate"):
14
  inputs = tokenizer(prompt, return_tensors="pt")
15
  outputs = model.generate(**inputs, max_new_tokens=100)
16
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
17
- st.write("Response:")
18
- st.write(response)
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
+ # Load the model and tokenizer from the Hugging Face repository
5
  model_name = "aifeifei798/DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored"
6
+ st.write(f"Loading model {model_name}...")
7
+
8
+ # The model and tokenizer will be downloaded and cached locally
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
  model = AutoModelForCausalLM.from_pretrained(model_name)
11
 
12
+ # Function to generate text based on user input
13
+ def generate_text(prompt):
 
 
 
14
  inputs = tokenizer(prompt, return_tensors="pt")
15
  outputs = model.generate(**inputs, max_new_tokens=100)
16
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
17
+
18
+ # Streamlit UI
19
+ st.title("DarkIdol Text Generation")
20
+ user_input = st.text_area("Enter your prompt:")
21
+
22
+ if st.button("Generate"):
23
+ with st.spinner("Generating..."):
24
+ output_text = generate_text(user_input)
25
+ st.write("Generated Text:")
26
+ st.write(output_text)