He Bo
commited on
Commit
•
cd2ec5a
1
Parent(s):
8aa0a12
updata
Browse files
app.py
CHANGED
@@ -82,12 +82,12 @@ def get_answer(question,session_id,language,prompt,search_engine,index,top_k,tem
|
|
82 |
url += '&language=english'
|
83 |
url += ('&embedding_endpoint_name=pytorch-inference-all-minilm-l6-v2')
|
84 |
url += ('&llm_embedding_name=pytorch-inference-vicuna-g5-2x')
|
85 |
-
elif language == "chinese-
|
86 |
url += '&language=chinese'
|
87 |
url += ('&embedding_endpoint_name=huggingface-inference-text2vec-base-chinese-v1')
|
88 |
url += ('&llm_embedding_name=pytorch-inference-chatglm-v1')
|
89 |
|
90 |
-
elif language == "chinese-
|
91 |
url += '&language=chinese'
|
92 |
url += ('&embedding_endpoint_name=huggingface-inference-text2vec-base-chinese-v1')
|
93 |
url += ('&llm_embedding_name=pytorch-inference-chatglm2-g5-2x')
|
@@ -164,7 +164,7 @@ def get_answer(question,session_id,language,prompt,search_engine,index,top_k,tem
|
|
164 |
if 'query_doc_scores' in result.keys():
|
165 |
query_doc_scores = list(result['query_doc_scores'])
|
166 |
if len(query_doc_scores) > 0:
|
167 |
-
confidence += ("query_doc_scores:" + str(
|
168 |
|
169 |
qa_relate_score = 0
|
170 |
if 'qa_relate_score' in result.keys():
|
@@ -176,7 +176,7 @@ def get_answer(question,session_id,language,prompt,search_engine,index,top_k,tem
|
|
176 |
if 'answer_relate_scores' in result.keys():
|
177 |
answer_relate_scores = list(result['answer_relate_scores'])
|
178 |
if len(answer_relate_scores) > 0:
|
179 |
-
confidence += ("answer_relate_scores:" + str(
|
180 |
|
181 |
list_overlap_score = 0
|
182 |
if 'list_overlap_score' in result.keys():
|
@@ -240,7 +240,7 @@ with demo:
|
|
240 |
session_id_textbox = gr.Textbox(label="Session ID")
|
241 |
qa_button = gr.Button("Summit")
|
242 |
|
243 |
-
qa_language_radio = gr.Radio(["chinese-
|
244 |
# qa_llm_radio = gr.Radio(["p3-8x", "g4dn-8x"],value="p3-8x",label="Chinese llm instance")
|
245 |
qa_prompt_textbox = gr.Textbox(label="Prompt( must include {context} and {question} )",placeholder=chinese_prompt,lines=2)
|
246 |
qa_search_engine_radio = gr.Radio(["OpenSearch","Kendra"],value="OpenSearch",label="Search engine")
|
|
|
82 |
url += '&language=english'
|
83 |
url += ('&embedding_endpoint_name=pytorch-inference-all-minilm-l6-v2')
|
84 |
url += ('&llm_embedding_name=pytorch-inference-vicuna-g5-2x')
|
85 |
+
elif language == "chinese-llm-v1":
|
86 |
url += '&language=chinese'
|
87 |
url += ('&embedding_endpoint_name=huggingface-inference-text2vec-base-chinese-v1')
|
88 |
url += ('&llm_embedding_name=pytorch-inference-chatglm-v1')
|
89 |
|
90 |
+
elif language == "chinese-llm-v2":
|
91 |
url += '&language=chinese'
|
92 |
url += ('&embedding_endpoint_name=huggingface-inference-text2vec-base-chinese-v1')
|
93 |
url += ('&llm_embedding_name=pytorch-inference-chatglm2-g5-2x')
|
|
|
164 |
if 'query_doc_scores' in result.keys():
|
165 |
query_doc_scores = list(result['query_doc_scores'])
|
166 |
if len(query_doc_scores) > 0:
|
167 |
+
confidence += ("query_doc_scores:" + str(query_doc_scores) + '\n')
|
168 |
|
169 |
qa_relate_score = 0
|
170 |
if 'qa_relate_score' in result.keys():
|
|
|
176 |
if 'answer_relate_scores' in result.keys():
|
177 |
answer_relate_scores = list(result['answer_relate_scores'])
|
178 |
if len(answer_relate_scores) > 0:
|
179 |
+
confidence += ("answer_relate_scores:" + str(answer_relate_scores) + '\n')
|
180 |
|
181 |
list_overlap_score = 0
|
182 |
if 'list_overlap_score' in result.keys():
|
|
|
240 |
session_id_textbox = gr.Textbox(label="Session ID")
|
241 |
qa_button = gr.Button("Summit")
|
242 |
|
243 |
+
qa_language_radio = gr.Radio(["chinese-llm-v1","chinese-llm-v2", "english"],value="chinese-llm-v1",label="Language")
|
244 |
# qa_llm_radio = gr.Radio(["p3-8x", "g4dn-8x"],value="p3-8x",label="Chinese llm instance")
|
245 |
qa_prompt_textbox = gr.Textbox(label="Prompt( must include {context} and {question} )",placeholder=chinese_prompt,lines=2)
|
246 |
qa_search_engine_radio = gr.Radio(["OpenSearch","Kendra"],value="OpenSearch",label="Search engine")
|