dh-mc commited on
Commit
208786a
1 Parent(s): 8282222

added command line chat

Browse files
Files changed (3) hide show
  1. Makefile +1 -1
  2. test.py +37 -1
  3. test.sh +0 -79
Makefile CHANGED
@@ -10,7 +10,7 @@ else
10
  endif
11
 
12
  test:
13
- PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0 python test.py $(TEST)
14
 
15
  chat:
16
  python test.py chat
 
10
  endif
11
 
12
  test:
13
+ python test.py $(TEST)
14
 
15
  chat:
16
  python test.py chat
test.py CHANGED
@@ -1,6 +1,7 @@
1
  # project/test.py
2
 
3
  import os
 
4
  import unittest
5
  from timeit import default_timer as timer
6
 
@@ -134,5 +135,40 @@ class TestQAChain(unittest.TestCase):
134
  self.run_test_case("huggingface", self.question)
135
 
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  if __name__ == "__main__":
138
- unittest.main()
 
 
 
 
1
  # project/test.py
2
 
3
  import os
4
+ import sys
5
  import unittest
6
  from timeit import default_timer as timer
7
 
 
135
  self.run_test_case("huggingface", self.question)
136
 
137
 
138
+ def chat():
139
+ start = timer()
140
+ llm_loader = app_init()[0]
141
+ end = timer()
142
+ print(f"Model loaded in {end - start:.3f}s")
143
+
144
+ chat_chain = ChatChain(llm_loader)
145
+ chat_history = []
146
+
147
+ chat_start = timer()
148
+
149
+ while True:
150
+ query = input("Please enter your question: ")
151
+ query = query.strip()
152
+ if query.lower() == "exit":
153
+ break
154
+
155
+ print("\nQuestion: " + query)
156
+
157
+ start = timer()
158
+ result = chat_chain.call_chain(
159
+ {"question": query, "chat_history": chat_history}, None
160
+ )
161
+ end = timer()
162
+ print(f"Completed in {end - start:.3f}s")
163
+
164
+ chat_history.append((query, result["text"]))
165
+
166
+ chat_end = timer()
167
+ print(f"Total time used: {chat_end - chat_start:.3f}s")
168
+
169
+
170
  if __name__ == "__main__":
171
+ if len(sys.argv) > 1 and sys.argv[1] == "chat":
172
+ chat()
173
+ else:
174
+ unittest.main()
test.sh DELETED
@@ -1,79 +0,0 @@
1
- #!/bin/sh
2
-
3
- EXT="$1"
4
-
5
- if [ "$EXT" = "" ]; then
6
- echo usage: $0 log_ext
7
- exit
8
- fi
9
-
10
- echo Using extension: $EXT
11
-
12
- [ ! -f .env ] || export $(grep -v '^#' .env | xargs)
13
-
14
- export LLM_MODEL_TYPE=openai
15
- export OPENAI_MODEL_NAME="gpt-3.5-turbo"
16
- echo Testing openai-${OPENAI_MODEL_NAME}
17
- python test.py 2>&1 | tee ./data/logs/openai-${OPENAI_MODEL_NAME}_${EXT}.log
18
-
19
- export OPENAI_MODEL_NAME="gpt-4"
20
- echo Testing openai-${OPENAI_MODEL_NAME}
21
- python test.py 2>&1 | tee ./data/logs/openai-${OPENAI_MODEL_NAME}_${EXT}.log
22
-
23
- export LLM_MODEL_TYPE=huggingface
24
-
25
- export HUGGINGFACE_MODEL_NAME_OR_PATH="lmsys/fastchat-t5-3b-v1.0"
26
- echo Testing $HUGGINGFACE_MODEL_NAME_OR_PATH
27
- python test.py 2>&1 | tee ./data/logs/fastchat-t5-3b-v1.0_${EXT}.log
28
-
29
-
30
- export HUGGINGFACE_MODEL_NAME_OR_PATH="TheBloke/wizardLM-7B-HF"
31
- echo Testing $HUGGINGFACE_MODEL_NAME_OR_PATH
32
- python test.py 2>&1 | tee ./data/logs/wizardLM-7B-HF_${EXT}.log
33
-
34
-
35
- export HUGGINGFACE_MODEL_NAME_OR_PATH="TheBloke/vicuna-7B-1.1-HF"
36
- echo Testing $HUGGINGFACE_MODEL_NAME_OR_PATH
37
- python test.py 2>&1 | tee ./data/logs/vicuna-7B-1.1-HF_${EXT}.log
38
-
39
-
40
- export HUGGINGFACE_MODEL_NAME_OR_PATH="nomic-ai/gpt4all-j"
41
- echo Testing $HUGGINGFACE_MODEL_NAME_OR_PATH
42
- python test.py 2>&1 | tee ./data/logs/gpt4all-j_${EXT}.log
43
-
44
-
45
- # export HUGGINGFACE_MODEL_NAME_OR_PATH="nomic-ai/gpt4all-falcon"
46
- # echo Testing $HUGGINGFACE_MODEL_NAME_OR_PATH
47
- # python test.py 2>&1 | tee ./data/logs/gpt4all-falcon_${EXT}.log
48
-
49
- export LLM_MODEL_TYPE=stablelm
50
-
51
- # export STABLELM_MODEL_NAME_OR_PATH="stabilityai/stablelm-tuned-alpha-7b"
52
- # echo Testing $STABLELM_MODEL_NAME_OR_PATH
53
- # python test.py 2>&1 | tee ./data/logs/stablelm-tuned-alpha-7b_${EXT}.log
54
-
55
-
56
- export STABLELM_MODEL_NAME_OR_PATH="OpenAssistant/stablelm-7b-sft-v7-epoch-3"
57
- echo Testing $STABLELM_MODEL_NAME_OR_PATH
58
- python test.py 2>&1 | tee ./data/logs/stablelm-7b-sft-v7-epoch-3_${EXT}.log
59
-
60
-
61
- export LLM_MODEL_TYPE=mosaicml
62
- export MOSAICML_MODEL_NAME_OR_PATH="mosaicml/mpt-7b-instruct"
63
- echo Testing $MOSAICML_MODEL_NAME_OR_PATH
64
- python test.py 2>&1 | tee ./data/logs/mpt-7b-instruct_${EXT}.log
65
-
66
-
67
- # export MOSAICML_MODEL_NAME_OR_PATH="mosaicml/mpt-30b-instruct"
68
- # echo Testing $MOSAICML_MODEL_NAME_OR_PATH
69
- # LOAD_QUANTIZED_MODEL=4bit python test.py 2>&1 | tee ./data/logs/mpt-30b-instruct_${EXT}.log
70
-
71
- export LLM_MODEL_TYPE=huggingface
72
- export HUGGINGFACE_MODEL_NAME_OR_PATH="HuggingFaceH4/starchat-beta"
73
- echo Testing $HUGGINGFACE_MODEL_NAME_OR_PATH
74
- LOAD_QUANTIZED_MODEL=8bit python test.py 2>&1 | tee ./data/logs/starchat-beta_${EXT}.log
75
-
76
-
77
- # export HUGGINGFACE_MODEL_NAME_OR_PATH="../../models/starcoder"
78
- # echo Testing $HUGGINGFACE_MODEL_NAME_OR_PATH
79
- # LOAD_QUANTIZED_MODEL=8bit python test.py 2>&1 | tee ./data/logs/starcoder_${EXT}.log