psychologists commited on
Commit
16e6da6
β€’
1 Parent(s): 9351154

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +14 -0
  2. app.py +72 -0
  3. docker-compose.yml +13 -0
  4. requirements.txt +2 -0
Dockerfile ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app/Dockerfile
2
+
3
+ FROM python:3.10-slim
4
+
5
+ WORKDIR /app
6
+
7
+ COPY ./ ./
8
+
9
+
10
+ RUN pip3 install -r requirements.txt
11
+
12
+ EXPOSE 8501
13
+
14
+ ENTRYPOINT ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"]
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ from ctransformers import AutoModelForCausalLM
4
+
5
+ # App title
6
+ st.set_page_config(page_title="πŸ¦™πŸ’¬ Llama 2 Chatbot")
7
+
8
+ @st.cache_resource()
9
+ def ChatModel(temperature, top_p):
10
+ return AutoModelForCausalLM.from_pretrained(
11
+ # 'ggml-llama-2-7b-chat-q4_0.bin',
12
+ 'Israr-dawar/psychology_chatbot',
13
+ # model_type='llama',
14
+ temperature=temperature,
15
+ top_p = top_p)
16
+
17
+ # Replicate Credentials
18
+ with st.sidebar:
19
+ st.title('πŸ¦™πŸ’¬ Llama 2 Chatbot')
20
+
21
+ # Refactored from <https://github.com/a16z-infra/llama2-chatbot>
22
+ st.subheader('Models and parameters')
23
+
24
+ temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=2.0, value=0.1, step=0.01)
25
+ top_p = st.sidebar.slider('top_p', min_value=0.01, max_value=1.0, value=0.9, step=0.01)
26
+ # max_length = st.sidebar.slider('max_length', min_value=64, max_value=4096, value=512, step=8)
27
+ chat_model =ChatModel(temperature, top_p)
28
+ # st.markdown('πŸ“– Learn how to build this app in this [blog](#link-to-blog)!')
29
+
30
+ # Store LLM generated responses
31
+ if "messages" not in st.session_state.keys():
32
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
33
+
34
+ # Display or clear chat messages
35
+ for message in st.session_state.messages:
36
+ with st.chat_message(message["role"]):
37
+ st.write(message["content"])
38
+
39
+ def clear_chat_history():
40
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
41
+ st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
42
+
43
+ # Function for generating LLaMA2 response
44
+ def generate_llama2_response(prompt_input):
45
+ string_dialogue = "You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'."
46
+ for dict_message in st.session_state.messages:
47
+ if dict_message["role"] == "user":
48
+ string_dialogue += "User: " + dict_message["content"] + "\\n\\n"
49
+ else:
50
+ string_dialogue += "Assistant: " + dict_message["content"] + "\\n\\n"
51
+ output = chat_model(f"prompt {string_dialogue} {prompt_input} Assistant: ")
52
+ return output
53
+
54
+ # User-provided prompt
55
+ if prompt := st.chat_input():
56
+ st.session_state.messages.append({"role": "user", "content": prompt})
57
+ with st.chat_message("user"):
58
+ st.write(prompt)
59
+
60
+ # Generate a new response if last message is not from assistant
61
+ if st.session_state.messages[-1]["role"] != "assistant":
62
+ with st.chat_message("assistant"):
63
+ with st.spinner("Thinking..."):
64
+ response = generate_llama2_response(prompt)
65
+ placeholder = st.empty()
66
+ full_response = ''
67
+ for item in response:
68
+ full_response += item
69
+ placeholder.markdown(full_response)
70
+ placeholder.markdown(full_response)
71
+ message = {"role": "assistant", "content": full_response}
72
+ st.session_state.messages.append(message)
docker-compose.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3'
2
+ services:
3
+ streamlit_app:
4
+ build:
5
+ context: .
6
+ container_name: streamlit_llama
7
+ ports:
8
+ - "8501:8501"
9
+
10
+ deploy:
11
+ resources:
12
+ limits:
13
+ memory: 4G
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ctransformers
2
+ streamlit