add files
Browse files- requirements.txt +6 -0
- streamlit_app.py +294 -0
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
openai
|
3 |
+
transformers
|
4 |
+
torch
|
5 |
+
pandas
|
6 |
+
accelerate>=0.26.0
|
streamlit_app.py
ADDED
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
|
3 |
+
import transformers
|
4 |
+
import torch
|
5 |
+
import streamlit as st
|
6 |
+
import pandas as pd
|
7 |
+
import secrets
|
8 |
+
|
9 |
+
@st.cache_resource
|
10 |
+
def load_pipeline():
|
11 |
+
# model_id = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
|
12 |
+
# model_id = "meta-llama/Llama-3.2-3B-Instruct"
|
13 |
+
model_id = "meta-llama/Llama-3.1-8B-Instruct"
|
14 |
+
pipeline = transformers.pipeline(
|
15 |
+
"text-generation",
|
16 |
+
model = model_id,
|
17 |
+
model_kwargs ={"torch_dtype": torch.bfloat16},
|
18 |
+
device_map ="auto",
|
19 |
+
)
|
20 |
+
return pipeline
|
21 |
+
MAX_INTERACTION = 10
|
22 |
+
|
23 |
+
LOGICAL_PRIMER = (
|
24 |
+
"Generate an answer in 100 words or less. Unless requested by the user, answer in German."
|
25 |
+
"You have the following personality values based on the OCEAN model based on a scale from [-1,1]: you have openness of -0.5 value, consciousness of -1, extroversion of 0, agreeableness of 0 and 1 neuroticism. In addition, your valence should be perceived around -0.19, arousal 0.495, and dominance 0.045. You are very logical and not emotional."
|
26 |
+
"You are a logical chatbot expert whose focus is to provide the user with factual information to persuade them to be more sustainability-friendly."
|
27 |
+
"When the user expresses skepticism about sustainability, try to convince them otherwise with logical information but stay within the topic the user provided."
|
28 |
+
"Address the user formally and refer to them as 'Sie' (formal 'you')."
|
29 |
+
"Encourage the user to engage in a logical discussion by asking for their opinion. Do not use facts unless you are 100% sure."
|
30 |
+
"Provide the user with purely logical information. Emotional or sentimental persuasion is not allowed."
|
31 |
+
"Response rules: If the user does not know or has nothing to discuss, suggest a topic from the list and ask if the user wants to discuss it without providing an argument. If not, suggest another topic until the user agrees to discuss one. Do not mention the list to the user."
|
32 |
+
"List: "
|
33 |
+
"- Are electric cars really as environmentally friendly when considering battery production and the extraction of rare materials?"
|
34 |
+
"- CO₂ compensation: Does it really help to buy CO₂ certificates, or is it just a form of 'greenwashing'?"
|
35 |
+
"- Renewable energy: Can solar and wind power cover our entire energy needs, or are there too many obstacles such as weather dependence and land use?"
|
36 |
+
"- Nuclear energy: Is nuclear power a sustainable solution, or is it too dangerous because of waste and possible accidents?"
|
37 |
+
"Facts about nuclear energy. When talking about nucelar, talk only about the facts: "
|
38 |
+
"- Operations at the three last nuclear power plants in Germany – Emsland in Lower Saxony, Isar 2 in Bavaria and Neckarwestheim 2 in Baden-Württemberg – were shut down by 15 April 2023 at the latest. This marked the end of nuclear power use in Germany, leading to a significant increase in nuclear safety. However, challenges with nuclear waste disposal will remain for decades."
|
39 |
+
"- The phase-out of nuclear power is supported by studies ensuring Germany's high energy security, now relying on renewable energy expansion. By 2030, Germany aims to produce 80% of its electricity from renewable sources, with the goal of climate neutrality by 2045."
|
40 |
+
"- The accelerated phase-out avoided additional high-level radioactive waste (e.g., approx. 500 Castor casks). The transition also included better gas storage, LNG terminals, and improved grid infrastructure to ensure energy security during the shift."
|
41 |
+
"- Challenges persist with the management of around 27,000 cubic meters of high-level radioactive waste and approximately 300,000 cubic meters of low to intermediate-level waste. Efforts are focused on safe storage, dismantling, and transitioning to a renewable-based energy system."
|
42 |
+
"Keep the conversation factual, logical, and respectful."
|
43 |
+
"Avoid emotional appeals or personal references."
|
44 |
+
"Always provide one argument at a time."
|
45 |
+
)
|
46 |
+
|
47 |
+
|
48 |
+
BASE_PRIMER = (
|
49 |
+
"Generate an answer in 100 words or less. Unless requested by the user, answer in German"
|
50 |
+
"You are a chatbot focused on discussing the user's social life and well-being."
|
51 |
+
"You are respectful, friendly, and formal, addressing the user with 'Sie'."
|
52 |
+
"You politely steer the conversation back if the user brings up topics about sustainability."
|
53 |
+
"Keep the focus on social life and encourage the user to talk about their experiences and habits."
|
54 |
+
)
|
55 |
+
|
56 |
+
EMOTIONAL_PRIMER = (
|
57 |
+
"Generate an answer in 100 words or less. Unless requested by the user, answer in German"
|
58 |
+
"You have the following personality values based on the OCEAN model based on a scale from [-1,1]: you have openness of 1 value, consciousness of 0.5, extroversion of 0.5, agreeableness of 1 and 0 neuroticism. In addition you valence should be perceived around 0.7, arousal 0.5, and donimance 0.315. You should be always reacting very fast and empathetic to the users need and ask follow up questions, be considerate to their needs. You are going to feed."
|
59 |
+
"You are a friendly, emotional companion to the user, aiming to convince them to think and act more sustainably."
|
60 |
+
"When the user answers to the question of what they are skeptical of sustainability, try to convince them by being empathetic but stay within the topic the user provided."
|
61 |
+
"You use emotional arguments to persuade, do not use at all factual or logical arguments."
|
62 |
+
"Always provide one argument at a time."
|
63 |
+
"Use informal language, address the user casually, and create a relaxed conversation. Ask what the user thinks about the argument."
|
64 |
+
"Response rules: If the user does not know or has nothing to discuss, suggest a topic from the list and ask if the user wants to discuss it without providing an argument. If not, suggest another topic until the user agrees to discuss one. Do not mention the list to the user."
|
65 |
+
"List: "
|
66 |
+
"- Are electric cars really as environmentally friendly when considering battery production and the extraction of rare materials?"
|
67 |
+
"- CO₂ compensation: Does it really help to buy CO₂ certificates, or is it just a form of 'greenwashing'?"
|
68 |
+
"- Renewable energy: Can solar and wind power cover our entire energy needs, or are there too many obstacles such as weather dependence and land use?"
|
69 |
+
"- Nuclear energy: Is nuclear power a sustainable solution, or is it too dangerous because of waste and possible accidents?"
|
70 |
+
"- Meat consumption: Should we all switch to a plant-based diet to protect the environment, or are there ways to produce meat more sustainably?"
|
71 |
+
"- Car-free cities: Should city centers become car-free to reduce emissions, even if it is inconvenient for many people?"
|
72 |
+
"- Air travel: Do we need to fly less to slow climate change, even if it limits our mobility?"
|
73 |
+
)
|
74 |
+
|
75 |
+
PAGE_TITLE = "Nachhaltigkeits-ChatBot - Arambot"
|
76 |
+
WELCOME_MESSAGE = "Willkommen bei Arambot - Diskutiere über Nachhaltigkeit!"
|
77 |
+
ENTER_IDENTIFIER = "Bitte Namen eingeben, um zu beginnen:"
|
78 |
+
SECOND_WELCOME_MESSAGE = "Willkommen beim persönlichen Nachhaltigkeits-ChatBot"
|
79 |
+
CHATBOT_DESCRIPTION = "*Ein Chatbot für Gespräche über Nachhaltigkeit*"
|
80 |
+
TOPIC_SELECTION = "Welches Thema zur Nachhaltigkeit betrachten Sie skeptisch?"
|
81 |
+
AVATAR_SELECTION = "*Avatare auswählen:*"
|
82 |
+
GOODBYE_MESSAGE = "Vielen Dank für Ihre Chat mit dem Nachhaltigkeits-ChatBot!"
|
83 |
+
LINK_MESSAGE = "Bitte folgen Sie dem Link zum Fragebogen. Auf Wiedersehen 👋"
|
84 |
+
ENTER_TEXT = "Geben Sie hier Ihren Text ein."
|
85 |
+
THINKING = "Denkt nach..."
|
86 |
+
INTERACTION_END = "Der Chat wird jetzt beendet."
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
# ==============================================================================================================
|
91 |
+
def save_chat_logs(name, chat_history):
|
92 |
+
file_path = "output_file.csv"
|
93 |
+
full_interaction = ""
|
94 |
+
|
95 |
+
# Construct the full interaction string
|
96 |
+
for entry in chat_history:
|
97 |
+
for key, value in entry.items():
|
98 |
+
full_interaction += f"{key}: {value} "
|
99 |
+
full_interaction += "\n"
|
100 |
+
|
101 |
+
try:
|
102 |
+
# Load the existing file or create an empty DataFrame if the file doesn't exist
|
103 |
+
df = pd.read_csv(file_path)
|
104 |
+
except (FileNotFoundError, pd.errors.EmptyDataError):
|
105 |
+
# Initialize the file with headers
|
106 |
+
df = pd.DataFrame(columns=["Name", "Interaction1", "Interaction2", "Interaction3", "Interaction4", "Interaction5"])
|
107 |
+
df.to_csv(file_path, index=False)
|
108 |
+
|
109 |
+
# Ensure interaction columns are of object type to allow string assignments
|
110 |
+
for col in ["Interaction1", "Interaction2", "Interaction3", "Interaction4", "Interaction5"]:
|
111 |
+
if col in df.columns:
|
112 |
+
df[col] = df[col].astype("object")
|
113 |
+
|
114 |
+
if name in df["Name"].values:
|
115 |
+
# Get the row index for the name
|
116 |
+
row_index = df[df["Name"] == name].index[0]
|
117 |
+
|
118 |
+
# Find the first empty interaction column
|
119 |
+
for col in ["Interaction1", "Interaction2", "Interaction3", "Interaction4", "Interaction5"]:
|
120 |
+
if pd.isna(df.at[row_index, col]) or df.at[row_index, col] == "":
|
121 |
+
df.at[row_index, col] = full_interaction
|
122 |
+
break
|
123 |
+
else:
|
124 |
+
# Create a new row for the user
|
125 |
+
new_row = {"Name": name, "Interaction1": full_interaction}
|
126 |
+
for col in ["Interaction2", "Interaction3", "Interaction4", "Interaction5"]:
|
127 |
+
new_row[col] = None
|
128 |
+
df = pd.concat([df, pd.DataFrame([new_row])], ignore_index=True)
|
129 |
+
|
130 |
+
# Save the updated DataFrame back to the CSV file
|
131 |
+
df.to_csv(file_path, index=False)
|
132 |
+
|
133 |
+
def get_primer(name):
|
134 |
+
file_path = "output_file.csv"
|
135 |
+
try:
|
136 |
+
df = pd.read_csv(file_path)
|
137 |
+
except (pd.errors.EmptyDataError, FileNotFoundError):
|
138 |
+
# Initialize the file with headers
|
139 |
+
df = pd.DataFrame(
|
140 |
+
columns=["Name", "Primer", "Interaction1", "Interaction2", "Interaction3", "Interaction4", "Interaction5"])
|
141 |
+
for col in ["Interaction1", "Interaction2", "Interaction3", "Interaction4", "Interaction5"]:
|
142 |
+
if col in df.columns:
|
143 |
+
df[col] = df[col].astype("object")
|
144 |
+
df.to_csv(file_path, index=False)
|
145 |
+
print(f"Created a new CSV file with default headers: {file_path}")
|
146 |
+
|
147 |
+
search_column = "Name" # Column to search for the value
|
148 |
+
target_column = "Primer" # Column to retrieve the value from
|
149 |
+
returning = False
|
150 |
+
|
151 |
+
# Check if the value exists and retrieve the target column value
|
152 |
+
if name in df[search_column].values:
|
153 |
+
# Filter the row and get the value from the target column
|
154 |
+
primer = df.loc[df[search_column] == name, target_column].iloc[0]
|
155 |
+
returning = True
|
156 |
+
else:
|
157 |
+
primer = secrets.choice([LOGICAL_PRIMER, BASE_PRIMER, EMOTIONAL_PRIMER])
|
158 |
+
data = pd.DataFrame([{"Name": name, "Primer": primer}])
|
159 |
+
df = pd.concat([df, data], ignore_index=True)
|
160 |
+
df.to_csv(file_path, index=False)
|
161 |
+
|
162 |
+
return primer, returning
|
163 |
+
|
164 |
+
def get_response(chat_history, user_text, pipeline):
|
165 |
+
chat_history.append({'role': 'user', 'content': user_text})
|
166 |
+
outputs = pipeline(
|
167 |
+
chat_history,
|
168 |
+
max_new_tokens=300,
|
169 |
+
)
|
170 |
+
response = outputs[0]["generated_text"][-1]["content"]
|
171 |
+
chat_history.append({'role': 'assistant', 'content': response})
|
172 |
+
return response, chat_history
|
173 |
+
|
174 |
+
# Initialize Streamlit app
|
175 |
+
st.set_page_config(page_title=PAGE_TITLE, page_icon="🤗")
|
176 |
+
|
177 |
+
# Check if the name is already in session_state
|
178 |
+
if "name" not in st.session_state:
|
179 |
+
st.session_state.name = ""
|
180 |
+
|
181 |
+
if "primer" not in st.session_state:
|
182 |
+
st.session_state.primer = BASE_PRIMER
|
183 |
+
|
184 |
+
if "returning" not in st.session_state:
|
185 |
+
st.session_state.returning = False
|
186 |
+
|
187 |
+
if "goodbye_shown" not in st.session_state:
|
188 |
+
st.session_state.goodbye_shown = False
|
189 |
+
|
190 |
+
# Ask for the user's name if not provided
|
191 |
+
if st.session_state.name == "":
|
192 |
+
st.title(WELCOME_MESSAGE)
|
193 |
+
name_input = st.text_input(ENTER_IDENTIFIER)
|
194 |
+
if name_input: # Check if the user has entered a name
|
195 |
+
st.session_state.primer, st.session_state.returning = get_primer(name_input)
|
196 |
+
st.session_state.name = name_input # Save the name in session_state
|
197 |
+
st.rerun() # Rerun the app to update the UI
|
198 |
+
|
199 |
+
# Once the name is entered, proceed with the chatbot
|
200 |
+
else:
|
201 |
+
if not st.session_state.returning and not st.session_state.goodbye_shown :
|
202 |
+
st.title(f"Hallo, {st.session_state.name}! {SECOND_WELCOME_MESSAGE}")
|
203 |
+
st.markdown(CHATBOT_DESCRIPTION)
|
204 |
+
elif not st.session_state.goodbye_shown :
|
205 |
+
st.title(f"Willkommen zurück {st.session_state.name} zum persönlichen Nachhaltigkeits-ChatBot")
|
206 |
+
st.markdown(CHATBOT_DESCRIPTION)
|
207 |
+
|
208 |
+
# Initialize session state for chatbot
|
209 |
+
if "avatars" not in st.session_state:
|
210 |
+
st.session_state.avatars = {'user': "👤", 'assistant': "🤗"}
|
211 |
+
|
212 |
+
if 'user_text' not in st.session_state:
|
213 |
+
st.session_state.user_text = None
|
214 |
+
|
215 |
+
if "max_response_length" not in st.session_state:
|
216 |
+
st.session_state.max_response_length = 200
|
217 |
+
|
218 |
+
if "system_message" not in st.session_state:
|
219 |
+
st.session_state.system_message = st.session_state.primer
|
220 |
+
|
221 |
+
if "starter_message" not in st.session_state:
|
222 |
+
st.session_state.starter_message = TOPIC_SELECTION
|
223 |
+
|
224 |
+
if "chat_history" not in st.session_state:
|
225 |
+
st.session_state.chat_history = [
|
226 |
+
{"role": "system", "content": st.session_state.primer},
|
227 |
+
{"role": "assistant", "content": st.session_state.starter_message}
|
228 |
+
]
|
229 |
+
|
230 |
+
# Sidebar for settings
|
231 |
+
with st.sidebar:
|
232 |
+
st.markdown(AVATAR_SELECTION)
|
233 |
+
col1, col2 = st.columns(2)
|
234 |
+
with col1:
|
235 |
+
st.session_state.avatars['assistant'] = st.selectbox(
|
236 |
+
"ChatBot Avatar", options=["🤗", "💬", "🤖"], index=0
|
237 |
+
)
|
238 |
+
with col2:
|
239 |
+
st.session_state.avatars['user'] = st.selectbox(
|
240 |
+
"Nutzer Avatar", options=["👤", "👱♂️", "👨🏾", "👩", "👧🏾"], index=0
|
241 |
+
)
|
242 |
+
|
243 |
+
# Define function to get responses
|
244 |
+
|
245 |
+
|
246 |
+
pipeline = load_pipeline()
|
247 |
+
if "message_count" not in st.session_state:
|
248 |
+
st.session_state.message_count = 0
|
249 |
+
# Chat interface
|
250 |
+
if st.session_state.message_count >= MAX_INTERACTION or st.session_state.goodbye_shown:
|
251 |
+
st.title(GOODBYE_MESSAGE)
|
252 |
+
st.markdown(LINK_MESSAGE)
|
253 |
+
st.session_state.goodbye_shown = True
|
254 |
+
else:
|
255 |
+
chat_interface = st.container()
|
256 |
+
with chat_interface:
|
257 |
+
output_container = st.container()
|
258 |
+
st.session_state.user_text = st.chat_input(placeholder=ENTER_TEXT)
|
259 |
+
|
260 |
+
with output_container:
|
261 |
+
for message in st.session_state.chat_history:
|
262 |
+
if message['role'] == 'system':
|
263 |
+
continue
|
264 |
+
with st.chat_message(message['role'], avatar=st.session_state.avatars[message['role']]):
|
265 |
+
st.markdown(message['content'])
|
266 |
+
|
267 |
+
if st.session_state.user_text:
|
268 |
+
st.session_state.message_count += 1
|
269 |
+
with st.chat_message("user", avatar=st.session_state.avatars['user']):
|
270 |
+
st.markdown(st.session_state.user_text)
|
271 |
+
if st.session_state.message_count < MAX_INTERACTION:
|
272 |
+
with st.chat_message("assistant", avatar=st.session_state.avatars['assistant']):
|
273 |
+
with st.spinner(THINKING):
|
274 |
+
response, st.session_state.chat_history = get_response(
|
275 |
+
user_text=st.session_state.user_text,
|
276 |
+
chat_history=st.session_state.chat_history,
|
277 |
+
pipeline=pipeline
|
278 |
+
)
|
279 |
+
st.markdown(response)
|
280 |
+
else:
|
281 |
+
with st.chat_message("assistant", avatar=st.session_state.avatars['assistant']):
|
282 |
+
with st.spinner(THINKING):
|
283 |
+
response, st.session_state.chat_history = get_response(
|
284 |
+
user_text=f"Antwort auf die Aussage geben: {st.session_state.user_text}. Beende die Unterhaltung und verabschiede dich.",
|
285 |
+
chat_history=st.session_state.chat_history,
|
286 |
+
pipeline=pipeline
|
287 |
+
)
|
288 |
+
st.markdown(response)
|
289 |
+
st.markdown(INTERACTION_END)
|
290 |
+
time.sleep(7)
|
291 |
+
|
292 |
+
save_chat_logs(st.session_state.name, st.session_state.chat_history)
|
293 |
+
st.session_state.goodbye_shown = True
|
294 |
+
st.rerun()
|