He Bo
commited on
Commit
•
3e8c841
1
Parent(s):
bfdddd4
update
Browse files
app.py
CHANGED
@@ -3,25 +3,33 @@ import json
|
|
3 |
import gradio as gr
|
4 |
from datetime import datetime
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
# chinese_index = "smart_search_qa_test_0614_wuyue_2"
|
10 |
-
# chinese_index = "smart_search_qa_demo_0618_cn_3"
|
11 |
-
chinese_index = "chinese_bge_test_0916"
|
12 |
-
english_index = "smart_search_qa_demo_0618_en_2"
|
13 |
|
|
|
|
|
|
|
|
|
14 |
|
15 |
-
|
|
|
16 |
|
|
|
|
|
|
|
|
|
17 |
|
18 |
-
chinese_prompt = """给定一个长文档和一个问题的以下提取部分,如果你不知道答案,就说你不知道。不要试图编造答案。用中文回答。
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
25 |
|
26 |
english_prompt = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
27 |
{context}
|
@@ -30,43 +38,68 @@ Question: {question}
|
|
30 |
Answer:"""
|
31 |
|
32 |
|
33 |
-
|
34 |
-
如下三个反括号中是aws的产品文档片段
|
35 |
-
```
|
36 |
{text}
|
37 |
-
```
|
38 |
-
请基于这些文档片段自动生成尽可能多的问题以及对应答案, 尽可能详细全面, 并且遵循如下规则:
|
39 |
-
1. "aws"需要一直被包含在Question中
|
40 |
-
2. 答案部分的内容必须为上述aws的产品文档片段的内容摘要
|
41 |
-
3. 问题部分需要以"Question:"开始
|
42 |
-
4. 答案部分需要以"Answer:"开始
|
43 |
-
"""
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
{text}
|
49 |
-
```
|
50 |
-
Please automatically generate FAQs based on these document fragments, with answers that should not exceed 50 words as much as possible, and follow the following rules:
|
51 |
-
1. 'aws' needs to be included in the question
|
52 |
-
2. The content of the answer section must be a summary of the content of the above document fragments
|
53 |
|
54 |
-
The
|
|
|
|
|
|
|
|
|
|
|
55 |
"""
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
```
|
62 |
-
Please automatically generate as many questions as possible based on this manual document, and follow these rules:
|
63 |
-
1. "aws" should be contained in every question
|
64 |
-
2. questions start with "Question:"
|
65 |
-
3. answers begin with "Answer:"
|
66 |
"""
|
67 |
|
|
|
|
|
68 |
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
question=question.replace('AWS','亚马逊云科技').replace('aws','亚马逊云科技').replace('Aws','亚马逊云科技')
|
72 |
print('question:',question)
|
@@ -75,7 +108,7 @@ def get_answer(task_type,question,session_id,language,prompt,search_engine,index
|
|
75 |
url = api + question
|
76 |
else:
|
77 |
url = api + "hello"
|
78 |
-
|
79 |
#task type: qa,chat
|
80 |
if task_type == "Knowledge base Q&A":
|
81 |
task = 'qa'
|
@@ -85,26 +118,49 @@ def get_answer(task_type,question,session_id,language,prompt,search_engine,index
|
|
85 |
|
86 |
if language == "english":
|
87 |
url += '&language=english'
|
88 |
-
url += ('&embedding_endpoint_name=
|
89 |
-
url += ('&llm_embedding_name=
|
90 |
elif language == "chinese":
|
91 |
url += '&language=chinese'
|
92 |
-
url += ('&embedding_endpoint_name=
|
93 |
-
|
94 |
-
url += ('&llm_embedding_name=pytorch-inference-chatglm2-g5-4x')
|
95 |
|
96 |
elif language == "chinese-tc":
|
97 |
url += '&language=chinese-tc'
|
98 |
-
url += ('&embedding_endpoint_name=
|
99 |
-
|
100 |
-
url += ('&llm_embedding_name=pytorch-inference-chatglm2-g5-4x')
|
101 |
|
102 |
if len(session_id) > 0:
|
103 |
url += ('&session_id='+session_id)
|
104 |
|
105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
if len(prompt) > 0:
|
107 |
url += ('&prompt='+prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
|
109 |
if search_engine == "OpenSearch":
|
110 |
url += ('&search_engine=opensearch')
|
@@ -115,6 +171,7 @@ def get_answer(task_type,question,session_id,language,prompt,search_engine,index
|
|
115 |
url += ('&index='+chinese_index)
|
116 |
elif language == "english" and len(english_index) >0:
|
117 |
url += ('&index='+english_index)
|
|
|
118 |
elif search_engine == "Kendra":
|
119 |
url += ('&search_engine=kendra')
|
120 |
if len(index) > 0:
|
@@ -123,6 +180,9 @@ def get_answer(task_type,question,session_id,language,prompt,search_engine,index
|
|
123 |
if int(top_k) > 0:
|
124 |
url += ('&top_k='+str(top_k))
|
125 |
|
|
|
|
|
|
|
126 |
for score_type in score_type_checklist:
|
127 |
url += ('&cal_' + score_type +'=true')
|
128 |
|
@@ -151,7 +211,15 @@ def get_answer(task_type,question,session_id,language,prompt,search_engine,index
|
|
151 |
item = source_list[i]
|
152 |
print('item:',item)
|
153 |
_id = "num:" + str(item['id'])
|
154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
score = "score:" + str(item['score'])
|
156 |
sentence = "sentence:" + item['sentence']
|
157 |
paragraph = "paragraph:" + item['paragraph']
|
@@ -195,28 +263,27 @@ def get_summarize(texts,language,model_type,prompt):
|
|
195 |
|
196 |
if language == "english":
|
197 |
url += '&language=english'
|
198 |
-
url += ('&embedding_endpoint_name=
|
199 |
-
url += ('&llm_embedding_name=
|
200 |
-
# url += ('&prompt='+en_prompt_template)
|
201 |
|
202 |
elif language == "chinese":
|
203 |
url += '&language=chinese'
|
204 |
-
url += ('&embedding_endpoint_name=
|
205 |
-
|
206 |
-
url += ('&llm_embedding_name=pytorch-inference-chatglm2-g5-2x')
|
207 |
-
# if llm_instance == '2x':
|
208 |
-
# url += ('&llm_embedding_name=pytorch-inference-chatglm-v1')
|
209 |
-
# elif llm_instance == '8x':
|
210 |
-
# url += ('&llm_embedding_name=pytorch-inference-chatglm-v1-8x')
|
211 |
-
|
212 |
|
213 |
if model_type == "claude2":
|
214 |
url += ('&model_type=bedrock')
|
215 |
url += ('&bedrock_api_url='+bedrock_url)
|
216 |
url += ('&bedrock_model_id=anthropic.claude-v2')
|
217 |
|
|
|
218 |
if len(prompt) > 0:
|
219 |
url += ('&prompt='+prompt)
|
|
|
|
|
|
|
|
|
|
|
220 |
|
221 |
print('url:',url)
|
222 |
response = requests.get(url)
|
@@ -226,12 +293,12 @@ def get_summarize(texts,language,model_type,prompt):
|
|
226 |
|
227 |
answer = result['summarize']
|
228 |
|
229 |
-
if language == 'english' and answer.find('The Question and Answer are:') > 0:
|
230 |
-
|
231 |
|
232 |
return answer
|
233 |
|
234 |
-
demo = gr.Blocks(title="
|
235 |
with demo:
|
236 |
gr.Markdown(
|
237 |
"# <center>AWS Intelligent Q&A Solution Guide"
|
@@ -248,18 +315,16 @@ with demo:
|
|
248 |
qa_button = gr.Button("Summit")
|
249 |
|
250 |
qa_language_radio = gr.Radio(["chinese","chinese-tc", "english"],value="chinese",label="Language")
|
251 |
-
|
252 |
qa_prompt_textbox = gr.Textbox(label="Prompt( must include {context} and {question} )",placeholder=chinese_prompt,lines=2)
|
253 |
qa_search_engine_radio = gr.Radio(["OpenSearch","Kendra"],value="OpenSearch",label="Search engine")
|
254 |
qa_index_textbox = gr.Textbox(label="OpenSearch index OR Kendra index id")
|
255 |
-
|
256 |
|
257 |
-
|
|
|
|
|
258 |
|
259 |
-
score_type_checklist = gr.CheckboxGroup(["query_answer_score", "answer_docs_score","docs_list_overlap_score"],value=[],label="Confidence score type")
|
260 |
-
|
261 |
-
#language_radio.change(fn=change_prompt, inputs=language_radio, outputs=prompt_textbox)
|
262 |
-
|
263 |
with gr.Column():
|
264 |
qa_output = [gr.outputs.Textbox(label="Answer"), gr.outputs.Textbox(label="Confidence"), gr.outputs.Textbox(label="Source"), gr.outputs.Textbox(label="Url"), gr.outputs.Textbox(label="Request time")]
|
265 |
|
@@ -270,14 +335,12 @@ with demo:
|
|
270 |
text_input = gr.Textbox(label="Input texts",lines=4)
|
271 |
summarize_button = gr.Button("Summit")
|
272 |
sm_language_radio = gr.Radio(["chinese", "english"],value="chinese",label="Language")
|
273 |
-
sm_model_type_radio = gr.Radio(["claude2","
|
274 |
-
|
275 |
-
sm_prompt_textbox = gr.Textbox(label="Prompt",lines=4, placeholder=EN_SUMMARIZE_PROMPT_TEMPLATE)
|
276 |
with gr.Column():
|
277 |
text_output = gr.Textbox()
|
278 |
|
279 |
-
|
280 |
-
qa_button.click(get_answer, inputs=[qa_task_radio,query_textbox,session_id_textbox,qa_language_radio,qa_prompt_textbox,qa_search_engine_radio,qa_index_textbox,qa_top_k_slider,score_type_checklist], outputs=qa_output)
|
281 |
summarize_button.click(get_summarize, inputs=[text_input,sm_language_radio,sm_model_type_radio,sm_prompt_textbox], outputs=text_output)
|
282 |
|
283 |
demo.launch()
|
|
|
3 |
import gradio as gr
|
4 |
from datetime import datetime
|
5 |
|
6 |
+
#Fill in your correct configuration
|
7 |
+
invoke_url = 'https://8og7denk4i.execute-api.us-west-2.amazonaws.com/prod'
|
8 |
+
bedrock_url = 'https://bx2kc13ys3.execute-api.us-east-1.amazonaws.com/prod/bedrock?'
|
|
|
|
|
|
|
|
|
9 |
|
10 |
+
# chinese_index = "chinese_bge_test_0916"
|
11 |
+
# chinese_index = "bge_test_cn_1004_5"
|
12 |
+
chinese_index = "digitimes_test_1005_title"
|
13 |
+
english_index = "chinese_bge_test_0916"
|
14 |
|
15 |
+
cn_embedding_endpoint = 'huggingface-inference-eb-zh'
|
16 |
+
cn_llm_endpoint = 'pytorch-inference-chatglm2-g5-4x'
|
17 |
|
18 |
+
en_embedding_endpoint = 'pytorch-inference-all-minilm-l6-v2'
|
19 |
+
# en_embedding_endpoint = 'huggingface-inference-eb-bge-en'
|
20 |
+
# en_llm_endpoint = 'meta-textgeneration-llama-2-7b-f-2023-07-19-06-07-05-430'
|
21 |
+
en_llm_endpoint = 'pytorch-inference-chatglm2-g5-4x'
|
22 |
|
|
|
23 |
|
24 |
+
#Modify the default prompt as needed
|
25 |
+
chinese_prompt = """基于以下已知信息,简洁和专业的来回答用户的问题,并告知是依据哪些信息来进行回答的。
|
26 |
+
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。
|
27 |
+
|
28 |
+
问题: {question}
|
29 |
+
=========
|
30 |
+
{context}
|
31 |
+
=========
|
32 |
+
答案:"""
|
33 |
|
34 |
english_prompt = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
35 |
{context}
|
|
|
38 |
Answer:"""
|
39 |
|
40 |
|
41 |
+
chinses_summarize_prompt="""请根据访客与客服的通话记录,写一段访客提出问题的摘要,突出显示与亚马逊云服务相关的要点, 摘要不需要有客服的相关内容:
|
|
|
|
|
42 |
{text}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
+
摘要是:"""
|
45 |
+
|
46 |
+
english_summarize_prompt="""Based on the call records between the visitor and the customer service, write a summary of the visitor's questions, highlighting the key points related to Amazon Web Services, and the summary does not need to have customer service-related content:
|
47 |
{text}
|
|
|
|
|
|
|
|
|
48 |
|
49 |
+
The summary is:"""
|
50 |
+
|
51 |
+
claude_chat_prompt_cn="""
|
52 |
+
Human: 请根据 {history},回答:{human_input}
|
53 |
+
|
54 |
+
Assistant:
|
55 |
"""
|
56 |
|
57 |
+
claude_chat_prompt_cn_tc="""
|
58 |
+
Human: 請根據 {history},使用繁體中文回答:{human_input}
|
59 |
+
|
60 |
+
Assistant:
|
|
|
|
|
|
|
|
|
|
|
61 |
"""
|
62 |
|
63 |
+
claude_chat_prompt_english="""
|
64 |
+
Human: Based on {history}, answer the question:{human_input}
|
65 |
|
66 |
+
Assistant:
|
67 |
+
"""
|
68 |
+
|
69 |
+
|
70 |
+
|
71 |
+
claude_rag_prompt_cn = """
|
72 |
+
Human: 基于以下已知信息,简洁和专业的来回答用户的问题,如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。
|
73 |
+
|
74 |
+
问题: {question}
|
75 |
+
=========
|
76 |
+
{context}
|
77 |
+
=========
|
78 |
+
Assistant:
|
79 |
+
"""
|
80 |
+
|
81 |
+
claude_rag_prompt_cn_tc = """
|
82 |
+
Human: 基於以下已知信息,簡潔和專業的來回答用戶的問題,如果無法從中得到答案,請說 "根據已知信息無法回答該問題" 或 "沒有提供足夠的���關信息",不允許在答案中添加編造成分,答案請使用繁體中文回答
|
83 |
+
|
84 |
+
問題: {question}
|
85 |
+
=========
|
86 |
+
{context}
|
87 |
+
=========
|
88 |
+
Assistant:
|
89 |
+
"""
|
90 |
+
|
91 |
+
claude_rag_prompt_english = """
|
92 |
+
Human: Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
93 |
+
{context}
|
94 |
+
|
95 |
+
Question: {question}
|
96 |
+
Assistant:
|
97 |
+
"""
|
98 |
+
|
99 |
+
|
100 |
+
api = invoke_url + '/langchain_processor_qa?query='
|
101 |
+
|
102 |
+
def get_answer(task_type,question,session_id,language,model_type,prompt,search_engine,index,top_k,temperature,score_type_checklist):
|
103 |
|
104 |
question=question.replace('AWS','亚马逊云科技').replace('aws','亚马逊云科技').replace('Aws','亚马逊云科技')
|
105 |
print('question:',question)
|
|
|
108 |
url = api + question
|
109 |
else:
|
110 |
url = api + "hello"
|
111 |
+
|
112 |
#task type: qa,chat
|
113 |
if task_type == "Knowledge base Q&A":
|
114 |
task = 'qa'
|
|
|
118 |
|
119 |
if language == "english":
|
120 |
url += '&language=english'
|
121 |
+
url += ('&embedding_endpoint_name='+en_embedding_endpoint)
|
122 |
+
url += ('&llm_embedding_name='+en_llm_endpoint)
|
123 |
elif language == "chinese":
|
124 |
url += '&language=chinese'
|
125 |
+
url += ('&embedding_endpoint_name='+cn_embedding_endpoint)
|
126 |
+
url += ('&llm_embedding_name='+cn_llm_endpoint)
|
|
|
127 |
|
128 |
elif language == "chinese-tc":
|
129 |
url += '&language=chinese-tc'
|
130 |
+
url += ('&embedding_endpoint_name='+cn_embedding_endpoint)
|
131 |
+
url += ('&llm_embedding_name='+cn_llm_endpoint)
|
|
|
132 |
|
133 |
if len(session_id) > 0:
|
134 |
url += ('&session_id='+session_id)
|
135 |
|
136 |
+
if model_type == "claude2":
|
137 |
+
url += ('&model_type=bedrock')
|
138 |
+
url += ('&bedrock_api_url='+bedrock_url)
|
139 |
+
url += ('&bedrock_model_id=anthropic.claude-v2')
|
140 |
+
elif model_type == "titan(english)":
|
141 |
+
url += ('&model_type=bedrock')
|
142 |
+
url += ('&bedrock_api_url='+bedrock_url)
|
143 |
+
url += ('&bedrock_model_id=amazon.titan-tg1-large')
|
144 |
+
elif model_type == "llama2(english)":
|
145 |
+
url += ('&model_type=llama2')
|
146 |
+
|
147 |
if len(prompt) > 0:
|
148 |
url += ('&prompt='+prompt)
|
149 |
+
elif model_type == "claude2":
|
150 |
+
if task_type == "Knowledge base Q&A":
|
151 |
+
if language == "english":
|
152 |
+
url += ('&prompt='+claude_rag_prompt_english)
|
153 |
+
elif language == "chinese":
|
154 |
+
url += ('&prompt='+claude_rag_prompt_cn)
|
155 |
+
elif language == "chinese-tc":
|
156 |
+
url += ('&prompt='+claude_rag_prompt_cn_tc)
|
157 |
+
else:
|
158 |
+
if language == "english":
|
159 |
+
url += ('&prompt='+claude_chat_prompt_english)
|
160 |
+
elif language == "chinese":
|
161 |
+
url += ('&prompt='+claude_chat_prompt_cn)
|
162 |
+
elif language == "chinese-tc":
|
163 |
+
url += ('&prompt='+claude_chat_prompt_cn_tc)
|
164 |
|
165 |
if search_engine == "OpenSearch":
|
166 |
url += ('&search_engine=opensearch')
|
|
|
171 |
url += ('&index='+chinese_index)
|
172 |
elif language == "english" and len(english_index) >0:
|
173 |
url += ('&index='+english_index)
|
174 |
+
|
175 |
elif search_engine == "Kendra":
|
176 |
url += ('&search_engine=kendra')
|
177 |
if len(index) > 0:
|
|
|
180 |
if int(top_k) > 0:
|
181 |
url += ('&top_k='+str(top_k))
|
182 |
|
183 |
+
if float(temperature) > 0.01:
|
184 |
+
url += ('&temperature='+str(temperature))
|
185 |
+
|
186 |
for score_type in score_type_checklist:
|
187 |
url += ('&cal_' + score_type +'=true')
|
188 |
|
|
|
211 |
item = source_list[i]
|
212 |
print('item:',item)
|
213 |
_id = "num:" + str(item['id'])
|
214 |
+
try:
|
215 |
+
source = ''
|
216 |
+
if 'source' in item.keys():
|
217 |
+
source = "source:" + item['source']
|
218 |
+
elif 'title' in item.keys():
|
219 |
+
source = "source:" + item['title']
|
220 |
+
except KeyError:
|
221 |
+
source ="source:unknown"
|
222 |
+
print("KeyError:source file not found")
|
223 |
score = "score:" + str(item['score'])
|
224 |
sentence = "sentence:" + item['sentence']
|
225 |
paragraph = "paragraph:" + item['paragraph']
|
|
|
263 |
|
264 |
if language == "english":
|
265 |
url += '&language=english'
|
266 |
+
url += ('&embedding_endpoint_name='+en_embedding_endpoint)
|
267 |
+
url += ('&llm_embedding_name='+en_llm_endpoint)
|
|
|
268 |
|
269 |
elif language == "chinese":
|
270 |
url += '&language=chinese'
|
271 |
+
url += ('&embedding_endpoint_name='+cn_embedding_endpoint)
|
272 |
+
url += ('&llm_embedding_name='+cn_llm_endpoint)
|
|
|
|
|
|
|
|
|
|
|
|
|
273 |
|
274 |
if model_type == "claude2":
|
275 |
url += ('&model_type=bedrock')
|
276 |
url += ('&bedrock_api_url='+bedrock_url)
|
277 |
url += ('&bedrock_model_id=anthropic.claude-v2')
|
278 |
|
279 |
+
|
280 |
if len(prompt) > 0:
|
281 |
url += ('&prompt='+prompt)
|
282 |
+
else:
|
283 |
+
if language == "english":
|
284 |
+
url += ('&prompt='+english_summarize_prompt)
|
285 |
+
elif language == "chinese":
|
286 |
+
url += ('&prompt='+chinses_summarize_prompt)
|
287 |
|
288 |
print('url:',url)
|
289 |
response = requests.get(url)
|
|
|
293 |
|
294 |
answer = result['summarize']
|
295 |
|
296 |
+
# if language == 'english' and answer.find('The Question and Answer are:') > 0:
|
297 |
+
# answer=answer.split('The Question and Answer are:')[-1].strip()
|
298 |
|
299 |
return answer
|
300 |
|
301 |
+
demo = gr.Blocks(title="AWS Intelligent Q&A Solution Guide")
|
302 |
with demo:
|
303 |
gr.Markdown(
|
304 |
"# <center>AWS Intelligent Q&A Solution Guide"
|
|
|
315 |
qa_button = gr.Button("Summit")
|
316 |
|
317 |
qa_language_radio = gr.Radio(["chinese","chinese-tc", "english"],value="chinese",label="Language")
|
318 |
+
qa_model_type_radio = gr.Radio(["claude2","chatGLM2"],value="chatGLM2",label="Model type")
|
319 |
qa_prompt_textbox = gr.Textbox(label="Prompt( must include {context} and {question} )",placeholder=chinese_prompt,lines=2)
|
320 |
qa_search_engine_radio = gr.Radio(["OpenSearch","Kendra"],value="OpenSearch",label="Search engine")
|
321 |
qa_index_textbox = gr.Textbox(label="OpenSearch index OR Kendra index id")
|
322 |
+
# qa_em_ep_textbox = gr.Textbox(label="Embedding Endpoint")
|
323 |
|
324 |
+
qa_top_k_slider = gr.Slider(label="Top_k of source text to LLM",value=1, minimum=1, maximum=20, step=1)
|
325 |
+
qa_temperature_slider = gr.Slider(label="Temperature parameter of LLM",value=0.01, minimum=0.01, maximum=1, step=0.01)
|
326 |
+
score_type_checklist = gr.CheckboxGroup(["query_answer_score", "answer_docs_score","docs_list_overlap_score"],value=["query_answer_score"],label="Confidence score type")
|
327 |
|
|
|
|
|
|
|
|
|
328 |
with gr.Column():
|
329 |
qa_output = [gr.outputs.Textbox(label="Answer"), gr.outputs.Textbox(label="Confidence"), gr.outputs.Textbox(label="Source"), gr.outputs.Textbox(label="Url"), gr.outputs.Textbox(label="Request time")]
|
330 |
|
|
|
335 |
text_input = gr.Textbox(label="Input texts",lines=4)
|
336 |
summarize_button = gr.Button("Summit")
|
337 |
sm_language_radio = gr.Radio(["chinese", "english"],value="chinese",label="Language")
|
338 |
+
sm_model_type_radio = gr.Radio(["claude2","chatGLM2"],value="chatGLM2",label="Model type")
|
339 |
+
sm_prompt_textbox = gr.Textbox(label="Prompt",lines=4, placeholder=chinses_summarize_prompt)
|
|
|
340 |
with gr.Column():
|
341 |
text_output = gr.Textbox()
|
342 |
|
343 |
+
qa_button.click(get_answer, inputs=[qa_task_radio,query_textbox,session_id_textbox,qa_language_radio,qa_model_type_radio,qa_prompt_textbox,qa_search_engine_radio,qa_index_textbox,qa_top_k_slider,qa_temperature_slider,score_type_checklist], outputs=qa_output)
|
|
|
344 |
summarize_button.click(get_summarize, inputs=[text_input,sm_language_radio,sm_model_type_radio,sm_prompt_textbox], outputs=text_output)
|
345 |
|
346 |
demo.launch()
|