Spaces:
Sleeping
Sleeping
brandyguillory
commited on
Commit
•
f1f5f03
1
Parent(s):
7ea2c94
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import pipeline
|
3 |
+
import warnings
|
4 |
+
|
5 |
+
|
6 |
+
|
7 |
+
# Title for the web app
|
8 |
+
st.title("Test Large Language Models from Hugging Face")
|
9 |
+
|
10 |
+
# Dropdown for model selection
|
11 |
+
models = [
|
12 |
+
"meta-llama/Llama-3.2-3B-Instruct",
|
13 |
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
14 |
+
"ibm-granite/granite-3.0-8b-instruct",
|
15 |
+
"bartowski/Meta-Llama-3.1-8B-Claude-GGUF"
|
16 |
+
]
|
17 |
+
|
18 |
+
|
19 |
+
selected_model = st.selectbox("Select a model to test:", models)
|
20 |
+
|
21 |
+
# Text input for user prompt
|
22 |
+
user_input = st.text_area("Enter your text prompt:", "Type something here...")
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
# Load the selected model using the pipeline
|
27 |
+
@st.cache_resource
|
28 |
+
def load_model(model_name):
|
29 |
+
|
30 |
+
return pipeline("text-generation", model=model_name, framework="tf")
|
31 |
+
|
32 |
+
with warnings.catch_warnings():
|
33 |
+
warnings.simplefilter("ignore")
|
34 |
+
access_token = os.getenv("HF_ACCESS_TOKEN") # Use an environment variable for the access token
|
35 |
+
return pipeline("text-generation", model=model_name, framework="tf", use_auth_token=access_token) # Use TensorFlow framework
|
36 |
+
|
37 |
+
|
38 |
+
# Button to run the model
|
39 |
+
if st.button("Generate Response"):
|
40 |
+
if user_input:
|
41 |
+
try:
|
42 |
+
# Load the selected model
|
43 |
+
generator = load_model(selected_model)
|
44 |
+
|
45 |
+
# Generate text based on the input with truncation enabled
|
46 |
+
with st.spinner("Generating response..."):
|
47 |
+
result = generator(user_input, max_length=100, num_return_sequences=1, truncation=True)
|
48 |
+
|
49 |
+
# Display the result
|
50 |
+
st.subheader("Generated Response:")
|
51 |
+
st.write(result[0]['generated_text'])
|
52 |
+
except Exception as e:
|
53 |
+
st.error(f"An error occurred: {e}")
|
54 |
+
else:
|
55 |
+
st.warning("Please enter a text prompt.")
|