Megatron17 commited on
Commit
b7b44ee
1 Parent(s): c6d4d6b

adding app file, dockerimage and requirements

Browse files
Files changed (3) hide show
  1. Dockerfile +11 -0
  2. app.py +75 -0
  3. requirements.txt +3 -0
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10
2
+ RUN useradd -m -u 1000 user
3
+ USER user
4
+ ENV HOME=/home/user \
5
+ PATH=/home/user/.local/bin:$PATH
6
+ WORKDIR $HOME/app
7
+ COPY --chown=user . $HOME/app
8
+ COPY ./requirements.txt ~/app/requirements.txt
9
+ RUN pip install -r requirements.txt
10
+ COPY . .
11
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #Importing Libraries
2
+
3
+ import os
4
+ import openai
5
+ import chainlit as cl
6
+ from chainlit.prompt import Prompt, PromptMessage
7
+ from chainlit.playground.providers import ChatOpenAI
8
+ from dotenv import load_dotenv
9
+
10
+ load_dotenv()
11
+ openai.api_key = os.environ["OPENAI_API_KEY"]
12
+
13
+ #Templates
14
+ system_template = """
15
+ You are a helpful assistant who always speaks in a pleasant tone!
16
+ """
17
+
18
+ user_template = """{input}
19
+ Think through your response step by step.
20
+ """
21
+
22
+ # Runs at the start of the user cession
23
+ @cl.on_chat_start
24
+ async def start_chat():
25
+ settings = {
26
+ "model": "gpt-3.5-turbo",# Model that we will be using
27
+ "temperature": 0, # Randomness in the answer
28
+ "max_tokens": 500,# Maximum length of tokens as input
29
+ "top_p": 1, #
30
+ "frequency_penalty": 0,
31
+ "presence_penalty": 0,
32
+ }
33
+
34
+ cl.user_session.set("settings", settings)
35
+
36
+ #Function that will run on each time the bot gets a user input
37
+ @cl.on_message
38
+ async def main(message:str):
39
+ settings = cl.user_session.get("settings")
40
+
41
+ prompt = Prompt(
42
+ provider = ChatOpenAI.id,
43
+ messages = [
44
+ PromptMessage(
45
+ role = "system",
46
+ template = system_template,
47
+ formatted = system_template,
48
+ ),
49
+ PromptMessage(
50
+ role = "user",
51
+ template = user_template,
52
+ formatted=user_template.format(input=message)
53
+ ),
54
+ ],
55
+ inputs = {"input":message},
56
+ settings = settings,
57
+ )
58
+
59
+ print([m.to_openai() for m in prompt.messages])
60
+
61
+ msg = cl.Messages(content="")
62
+
63
+ # Call OpenAI
64
+ async for stream_resp in await openai.ChatCompletion.acreate(
65
+ messages = [m.to_openai() for m in prompt.messages],stream = True, **settings
66
+ ):
67
+ token = stream_resp.choices[0]["delta"].get("content","")
68
+ await msg.stream_token(token)
69
+
70
+ #Updating the prompt answer to the answer generated by the LLM
71
+ prompt.completion = msg.content
72
+ #Updating the message to set the context right.
73
+ msg.prompt = prompt
74
+
75
+ await msg.send()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ chainlit==0.7.0
2
+ openai==0.28.0
3
+ python-dotenv==1.0.0