omitakahiro commited on
Commit
476e05d
โ€ข
1 Parent(s): bce6ab5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -19
app.py CHANGED
@@ -1,35 +1,44 @@
1
  import json
2
  import os
3
 
 
4
  import requests
5
  import streamlit as st
6
 
7
  CHATBOT_ENDPOINT = os.environ["CHATBOT_ENDPOINT"]
8
  TOKEN = os.environ["TOKEN"]
 
 
 
 
 
 
 
9
 
10
  def generate(prompt):
11
 
12
  try:
13
- r = requests.post(
14
- CHATBOT_ENDPOINT,
15
- data=json.dumps({"instruction": prompt, "token": TOKEN}),
16
- headers = {"content-type": "application/json"},
17
- timeout = 20,
18
- stream = True,
 
 
19
  )
20
- new_str = b""
21
- for s in r.iter_content():
22
- new_str += s
23
- try:
24
- output_str = new_str.decode("utf-8")
25
- yield output_str.replace("\n", " \n")
26
- new_str = b""
27
- except:
28
- pass
29
  except:
30
- yield "<<Some errors occured>>"
 
31
 
32
- intro = """This is a demo site for Stockmark-LLM-100b. This service is running on AWS Inferentia2.
33
  - Pretrained model: [stockmark/stockmark-100b](https://huggingface.co/stockmark/stockmark-100b)
34
  - Instruction tuned model: [stockmark/stockmark-100b-instruct-v0.1](https://huggingface.co/stockmark/stockmark-100b-instruct-v0.1)
35
  """
@@ -39,6 +48,42 @@ disclaimer = """
39
  - We may use users chat data in this demo to improve our LLM.
40
  """
41
 
42
- st.title("Stockmark-LLM-100b")
43
- st.markdown("็”ณใ—่จณใ‚ใ‚Šใพใ›ใ‚“ใŒใ€ใŸใ ใ„ใพใ‚ตใƒผใƒใฎๅพฉๆ—งใ‚’ใŠใ“ใชใฃใฆใŠใ‚Šใพใ™ใ€‚ใ—ใฐใ‚‰ใใ—ใฆใ‹ใ‚‰ใ‚ขใ‚ฏใ‚ปใ‚นใ—ใฆใใ ใ•ใ„ใ€‚")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
 
 
 
 
1
  import json
2
  import os
3
 
4
+ import openai
5
  import requests
6
  import streamlit as st
7
 
8
  CHATBOT_ENDPOINT = os.environ["CHATBOT_ENDPOINT"]
9
  TOKEN = os.environ["TOKEN"]
10
+ MAINTENANCE = os.environ.get("MAINTENANCE", 0)
11
+
12
+ client = openai.OpenAI(
13
+ base_url=CHATBOT_ENDPOINT,
14
+ api_key=TOKEN,
15
+ timeout=60
16
+ )
17
 
18
  def generate(prompt):
19
 
20
  try:
21
+ completion = client.chat.completions.create(
22
+ model="stockmark/stockmark-100b-instruct-merged-v0.1",
23
+ messages=[{"role": "user", "content": prompt}],
24
+ extra_body={"repetition_penalty": 1.05},
25
+ max_tokens=256,
26
+ temperature=0.5,
27
+ top_p=0.95,
28
+ stream=True
29
  )
30
+
31
+ for s in completion:
32
+ s = s.choices[0].delta.content
33
+ if s:
34
+ if s == "\n":
35
+ s = " \n"
36
+ yield s
 
 
37
  except:
38
+ yield "<<ไบˆๆœŸใ›ใฌใ‚จใƒฉใƒผใŒ็™บ็”Ÿใ—ใฆใŠใ‚Šใพใ™ใ€‚ใ—ใฐใ‚‰ใใ—ใฆใ‹ใ‚‰ใ‚ขใ‚ฏใ‚ปใ‚นใใ ใ•ใ„ใ€‚>>"
39
+
40
 
41
+ intro = """This is a demo site for Stockmark-LLM-100b. This service is running on AWS Inferentia2. Currently, the response is sometimes slow due to many requests to the server.
42
  - Pretrained model: [stockmark/stockmark-100b](https://huggingface.co/stockmark/stockmark-100b)
43
  - Instruction tuned model: [stockmark/stockmark-100b-instruct-v0.1](https://huggingface.co/stockmark/stockmark-100b-instruct-v0.1)
44
  """
 
48
  - We may use users chat data in this demo to improve our LLM.
49
  """
50
 
51
+ if MAINTENANCE:
52
+ st.title("Stockmark-LLM-100b")
53
+ st.markdown("ใŸใ ใ„ใพใƒกใƒณใƒ†ใƒŠใƒณใ‚นไธญใงใ™ใ€‚็”ณใ—่จณใ‚ใ‚Šใพใ›ใ‚“ใŒใ€ใ—ใฐใ‚‰ใใ—ใฆใ‹ใ‚‰ใ‚ขใ‚ฏใ‚ปใ‚นใ—ใฆใใ ใ•ใ„ใ€‚")
54
+ st.stop()
55
+
56
+ tab1, tab2 = st.tabs(["Demo", "Disclaimer"])
57
+
58
+ with tab1:
59
+ st.title("Stockmark-LLM-100b")
60
+ st.markdown(intro)
61
+
62
+ prompt = st.session_state.get("prompt", "")
63
+ response = st.session_state.get("response", "")
64
+
65
+ if prompt == "" or response:
66
+ print("new_session")
67
+ prompt_new = st.text_area("Prompt:")
68
+ if prompt_new:
69
+ st.session_state["prompt"] = prompt_new
70
+ st.session_state["response"] = ""
71
+ st.rerun()
72
+ else:
73
+ prompt = st.text_area("Prompt:", value=prompt, disabled=True)
74
+
75
+ if prompt:
76
+
77
+ if response:
78
+ with st.chat_message("assistant"):
79
+ st.write(response)
80
+ else:
81
+ with st.chat_message("assistant"):
82
+ response = st.write_stream(generate(prompt))
83
+
84
+ st.session_state["response"] = response
85
+ st.rerun()
86
 
87
+ with tab2:
88
+ st.title("Stockmark-LLM-100b: Disclaimer")
89
+ st.markdown(disclaimer)