maerkeov commited on
Commit
94f4415
1 Parent(s): 15b2177

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +154 -0
app.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import gradio as gr
5
+ import requests
6
+ from lagent.schema import AgentStatusCode
7
+
8
+ os.system("python -m mindsearch.app --lang cn --model_format internlm_silicon &")
9
+
10
+ PLANNER_HISTORY = []
11
+ SEARCHER_HISTORY = []
12
+
13
+
14
+ def rst_mem(history_planner: list, history_searcher: list):
15
+ '''
16
+ Reset the chatbot memory.
17
+ '''
18
+ history_planner = []
19
+ history_searcher = []
20
+ if PLANNER_HISTORY:
21
+ PLANNER_HISTORY.clear()
22
+ return history_planner, history_searcher
23
+
24
+
25
+ def format_response(gr_history, agent_return):
26
+ if agent_return['state'] in [
27
+ AgentStatusCode.STREAM_ING, AgentStatusCode.ANSWER_ING
28
+ ]:
29
+ gr_history[-1][1] = agent_return['response']
30
+ elif agent_return['state'] == AgentStatusCode.PLUGIN_START:
31
+ thought = gr_history[-1][1].split('```')[0]
32
+ if agent_return['response'].startswith('```'):
33
+ gr_history[-1][1] = thought + '\n' + agent_return['response']
34
+ elif agent_return['state'] == AgentStatusCode.PLUGIN_END:
35
+ thought = gr_history[-1][1].split('```')[0]
36
+ if isinstance(agent_return['response'], dict):
37
+ gr_history[-1][
38
+ 1] = thought + '\n' + f'```json\n{json.dumps(agent_return["response"], ensure_ascii=False, indent=4)}\n```' # noqa: E501
39
+ elif agent_return['state'] == AgentStatusCode.PLUGIN_RETURN:
40
+ assert agent_return['inner_steps'][-1]['role'] == 'environment'
41
+ item = agent_return['inner_steps'][-1]
42
+ gr_history.append([
43
+ None,
44
+ f"```json\n{json.dumps(item['content'], ensure_ascii=False, indent=4)}\n```"
45
+ ])
46
+ gr_history.append([None, ''])
47
+ return
48
+
49
+
50
+ def predict(history_planner, history_searcher):
51
+
52
+ def streaming(raw_response):
53
+ for chunk in raw_response.iter_lines(chunk_size=8192,
54
+ decode_unicode=False,
55
+ delimiter=b'\n'):
56
+ if chunk:
57
+ decoded = chunk.decode('utf-8')
58
+ if decoded == '\r':
59
+ continue
60
+ if decoded[:6] == 'data: ':
61
+ decoded = decoded[6:]
62
+ elif decoded.startswith(': ping - '):
63
+ continue
64
+ response = json.loads(decoded)
65
+ yield (response['response'], response['current_node'])
66
+
67
+ global PLANNER_HISTORY
68
+ PLANNER_HISTORY.append(dict(role='user', content=history_planner[-1][0]))
69
+ new_search_turn = True
70
+
71
+ url = 'http://localhost:8002/solve'
72
+ headers = {'Content-Type': 'application/json'}
73
+ data = {'inputs': PLANNER_HISTORY}
74
+ raw_response = requests.post(url,
75
+ headers=headers,
76
+ data=json.dumps(data),
77
+ timeout=20,
78
+ stream=True)
79
+
80
+ for resp in streaming(raw_response):
81
+ agent_return, node_name = resp
82
+ if node_name:
83
+ if node_name in ['root', 'response']:
84
+ continue
85
+ agent_return = agent_return['nodes'][node_name]['detail']
86
+ if new_search_turn:
87
+ history_searcher.append([agent_return['content'], ''])
88
+ new_search_turn = False
89
+ format_response(history_searcher, agent_return)
90
+ if agent_return['state'] == AgentStatusCode.END:
91
+ new_search_turn = True
92
+ yield history_planner, history_searcher
93
+ else:
94
+ new_search_turn = True
95
+ format_response(history_planner, agent_return)
96
+ if agent_return['state'] == AgentStatusCode.END:
97
+ PLANNER_HISTORY = agent_return['inner_steps']
98
+ yield history_planner, history_searcher
99
+ return history_planner, history_searcher
100
+
101
+
102
+ with gr.Blocks() as demo:
103
+ gr.HTML("""<h1 align="center">MindSearch Gradio Demo</h1>""")
104
+ gr.HTML("""<p style="text-align: center; font-family: Arial, sans-serif;">MindSearch is an open-source AI Search Engine Framework with Perplexity.ai Pro performance. You can deploy your own Perplexity.ai-style search engine using either closed-source LLMs (GPT, Claude) or open-source LLMs (InternLM2.5-7b-chat).</p>""")
105
+ gr.HTML("""
106
+ <div style="text-align: center; font-size: 16px;">
107
+ <a href="https://github.com/InternLM/MindSearch" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">🔗 GitHub</a>
108
+ <a href="https://arxiv.org/abs/2407.20183" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">📄 Arxiv</a>
109
+ <a href="https://huggingface.co/papers/2407.20183" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">📚 Hugging Face Papers</a>
110
+ <a href="https://huggingface.co/spaces/internlm/MindSearch" style="text-decoration: none; color: #4A90E2;">🤗 Hugging Face Demo</a>
111
+ </div>
112
+ """)
113
+ with gr.Row():
114
+ with gr.Column(scale=10):
115
+ with gr.Row():
116
+ with gr.Column():
117
+ planner = gr.Chatbot(label='planner',
118
+ height=700,
119
+ show_label=True,
120
+ show_copy_button=True,
121
+ bubble_full_width=False,
122
+ render_markdown=True)
123
+ with gr.Column():
124
+ searcher = gr.Chatbot(label='searcher',
125
+ height=700,
126
+ show_label=True,
127
+ show_copy_button=True,
128
+ bubble_full_width=False,
129
+ render_markdown=True)
130
+ with gr.Row():
131
+ user_input = gr.Textbox(show_label=False,
132
+ placeholder='帮我搜索一下 InternLM 开源体系',
133
+ lines=5,
134
+ container=False)
135
+ with gr.Row():
136
+ with gr.Column(scale=2):
137
+ submitBtn = gr.Button('Submit')
138
+ with gr.Column(scale=1, min_width=20):
139
+ emptyBtn = gr.Button('Clear History')
140
+
141
+ def user(query, history):
142
+ return '', history + [[query, '']]
143
+
144
+ submitBtn.click(user, [user_input, planner], [user_input, planner],
145
+ queue=False).then(predict, [planner, searcher],
146
+ [planner, searcher])
147
+ emptyBtn.click(rst_mem, [planner, searcher], [planner, searcher],
148
+ queue=False)
149
+
150
+ demo.queue()
151
+ demo.launch(server_name='0.0.0.0',
152
+ server_port=7860,
153
+ inbrowser=True,
154
+ share=True)