vansin commited on
Commit
ee163cc
1 Parent(s): 65681d8

feat: update

Browse files
Files changed (2) hide show
  1. app.py +145 -145
  2. frontend/React/vite.config.ts +1 -1
app.py CHANGED
@@ -5,7 +5,7 @@ import sys
5
 
6
  # # os.system("python -m mindsearch.app --lang en --model_format internlm_server")
7
 
8
-
9
 
10
 
11
  # from flask import Flask, send_from_directory
@@ -28,151 +28,151 @@ import sys
28
  # app.run(debug=False, port=7860, host="0.0.0.0")
29
 
30
 
31
- import json
32
-
33
- import gradio as gr
34
- import requests
35
- from lagent.schema import AgentStatusCode
36
-
37
- PLANNER_HISTORY = []
38
- SEARCHER_HISTORY = []
39
-
40
-
41
- def rst_mem(history_planner: list, history_searcher: list):
42
- '''
43
- Reset the chatbot memory.
44
- '''
45
- history_planner = []
46
- history_searcher = []
47
- if PLANNER_HISTORY:
48
- PLANNER_HISTORY.clear()
49
- return history_planner, history_searcher
50
-
51
-
52
- def format_response(gr_history, agent_return):
53
- if agent_return['state'] in [
54
- AgentStatusCode.STREAM_ING, AgentStatusCode.ANSWER_ING
55
- ]:
56
- gr_history[-1][1] = agent_return['response']
57
- elif agent_return['state'] == AgentStatusCode.PLUGIN_START:
58
- thought = gr_history[-1][1].split('```')[0]
59
- if agent_return['response'].startswith('```'):
60
- gr_history[-1][1] = thought + '\n' + agent_return['response']
61
- elif agent_return['state'] == AgentStatusCode.PLUGIN_END:
62
- thought = gr_history[-1][1].split('```')[0]
63
- if isinstance(agent_return['response'], dict):
64
- gr_history[-1][
65
- 1] = thought + '\n' + f'```json\n{json.dumps(agent_return["response"], ensure_ascii=False, indent=4)}\n```' # noqa: E501
66
- elif agent_return['state'] == AgentStatusCode.PLUGIN_RETURN:
67
- assert agent_return['inner_steps'][-1]['role'] == 'environment'
68
- item = agent_return['inner_steps'][-1]
69
- gr_history.append([
70
- None,
71
- f"```json\n{json.dumps(item['content'], ensure_ascii=False, indent=4)}\n```"
72
- ])
73
- gr_history.append([None, ''])
74
- return
75
-
76
-
77
- def predict(history_planner, history_searcher):
78
-
79
- def streaming(raw_response):
80
- for chunk in raw_response.iter_lines(chunk_size=8192,
81
- decode_unicode=False,
82
- delimiter=b'\n'):
83
- if chunk:
84
- decoded = chunk.decode('utf-8')
85
- if decoded == '\r':
86
- continue
87
- if decoded[:6] == 'data: ':
88
- decoded = decoded[6:]
89
- elif decoded.startswith(': ping - '):
90
- continue
91
- response = json.loads(decoded)
92
- yield (response['response'], response['current_node'])
93
-
94
- global PLANNER_HISTORY
95
- PLANNER_HISTORY.append(dict(role='user', content=history_planner[-1][0]))
96
- new_search_turn = True
97
-
98
- url = 'http://localhost:8002/solve'
99
- headers = {'Content-Type': 'application/json'}
100
- data = {'inputs': PLANNER_HISTORY}
101
- raw_response = requests.post(url,
102
- headers=headers,
103
- data=json.dumps(data),
104
- timeout=20,
105
- stream=True)
106
-
107
- for resp in streaming(raw_response):
108
- agent_return, node_name = resp
109
- if node_name:
110
- if node_name in ['root', 'response']:
111
- continue
112
- agent_return = agent_return['nodes'][node_name]['detail']
113
- if new_search_turn:
114
- history_searcher.append([agent_return['content'], ''])
115
- new_search_turn = False
116
- format_response(history_searcher, agent_return)
117
- if agent_return['state'] == AgentStatusCode.END:
118
- new_search_turn = True
119
- yield history_planner, history_searcher
120
- else:
121
- new_search_turn = True
122
- format_response(history_planner, agent_return)
123
- if agent_return['state'] == AgentStatusCode.END:
124
- PLANNER_HISTORY = agent_return['inner_steps']
125
- yield history_planner, history_searcher
126
- return history_planner, history_searcher
127
-
128
-
129
- with gr.Blocks() as demo:
130
- gr.HTML("""<h1 align="center">WebAgent Gradio Simple Demo</h1>""")
131
- with gr.Row():
132
- with gr.Column(scale=10):
133
- with gr.Row():
134
- with gr.Column():
135
- planner = gr.Chatbot(label='planner',
136
- height=700,
137
- show_label=True,
138
- show_copy_button=True,
139
- bubble_full_width=False,
140
- render_markdown=True)
141
- with gr.Column():
142
- searcher = gr.Chatbot(label='searcher',
143
- height=700,
144
- show_label=True,
145
- show_copy_button=True,
146
- bubble_full_width=False,
147
- render_markdown=True)
148
- with gr.Row():
149
- user_input = gr.Textbox(show_label=False,
150
- placeholder='inputs...',
151
- lines=5,
152
- container=False)
153
- with gr.Row():
154
- with gr.Column(scale=2):
155
- submitBtn = gr.Button('Submit')
156
- with gr.Column(scale=1, min_width=20):
157
- emptyBtn = gr.Button('Clear History')
158
-
159
- def user(query, history):
160
- return '', history + [[query, '']]
161
-
162
- submitBtn.click(user, [user_input, planner], [user_input, planner],
163
- queue=False).then(predict, [planner, searcher],
164
- [planner, searcher])
165
- emptyBtn.click(rst_mem, [planner, searcher], [planner, searcher],
166
- queue=False)
167
-
168
- # subprocess.Popen(["python", "-m", "mindsearch.app", "--lang", "en", "--model_format", "internlm_server"], shell=True, stdout=sys.stdout, stderr=sys.stderr)
169
 
170
  os.system("python -m mindsearch.app --lang en --model_format internlm_server &")
171
 
172
- demo.queue()
173
- demo.launch(server_name='0.0.0.0',
174
- server_port=7860,
175
- inbrowser=True,
176
- share=True)
177
 
178
- pass
 
5
 
6
  # # os.system("python -m mindsearch.app --lang en --model_format internlm_server")
7
 
8
+ os.system("apt install nodejs npm && cd frontend/React && npm install && npm run start")
9
 
10
 
11
  # from flask import Flask, send_from_directory
 
28
  # app.run(debug=False, port=7860, host="0.0.0.0")
29
 
30
 
31
+ # import json
32
+
33
+ # import gradio as gr
34
+ # import requests
35
+ # from lagent.schema import AgentStatusCode
36
+
37
+ # PLANNER_HISTORY = []
38
+ # SEARCHER_HISTORY = []
39
+
40
+
41
+ # def rst_mem(history_planner: list, history_searcher: list):
42
+ # '''
43
+ # Reset the chatbot memory.
44
+ # '''
45
+ # history_planner = []
46
+ # history_searcher = []
47
+ # if PLANNER_HISTORY:
48
+ # PLANNER_HISTORY.clear()
49
+ # return history_planner, history_searcher
50
+
51
+
52
+ # def format_response(gr_history, agent_return):
53
+ # if agent_return['state'] in [
54
+ # AgentStatusCode.STREAM_ING, AgentStatusCode.ANSWER_ING
55
+ # ]:
56
+ # gr_history[-1][1] = agent_return['response']
57
+ # elif agent_return['state'] == AgentStatusCode.PLUGIN_START:
58
+ # thought = gr_history[-1][1].split('```')[0]
59
+ # if agent_return['response'].startswith('```'):
60
+ # gr_history[-1][1] = thought + '\n' + agent_return['response']
61
+ # elif agent_return['state'] == AgentStatusCode.PLUGIN_END:
62
+ # thought = gr_history[-1][1].split('```')[0]
63
+ # if isinstance(agent_return['response'], dict):
64
+ # gr_history[-1][
65
+ # 1] = thought + '\n' + f'```json\n{json.dumps(agent_return["response"], ensure_ascii=False, indent=4)}\n```' # noqa: E501
66
+ # elif agent_return['state'] == AgentStatusCode.PLUGIN_RETURN:
67
+ # assert agent_return['inner_steps'][-1]['role'] == 'environment'
68
+ # item = agent_return['inner_steps'][-1]
69
+ # gr_history.append([
70
+ # None,
71
+ # f"```json\n{json.dumps(item['content'], ensure_ascii=False, indent=4)}\n```"
72
+ # ])
73
+ # gr_history.append([None, ''])
74
+ # return
75
+
76
+
77
+ # def predict(history_planner, history_searcher):
78
+
79
+ # def streaming(raw_response):
80
+ # for chunk in raw_response.iter_lines(chunk_size=8192,
81
+ # decode_unicode=False,
82
+ # delimiter=b'\n'):
83
+ # if chunk:
84
+ # decoded = chunk.decode('utf-8')
85
+ # if decoded == '\r':
86
+ # continue
87
+ # if decoded[:6] == 'data: ':
88
+ # decoded = decoded[6:]
89
+ # elif decoded.startswith(': ping - '):
90
+ # continue
91
+ # response = json.loads(decoded)
92
+ # yield (response['response'], response['current_node'])
93
+
94
+ # global PLANNER_HISTORY
95
+ # PLANNER_HISTORY.append(dict(role='user', content=history_planner[-1][0]))
96
+ # new_search_turn = True
97
+
98
+ # url = 'http://localhost:8002/solve'
99
+ # headers = {'Content-Type': 'application/json'}
100
+ # data = {'inputs': PLANNER_HISTORY}
101
+ # raw_response = requests.post(url,
102
+ # headers=headers,
103
+ # data=json.dumps(data),
104
+ # timeout=20,
105
+ # stream=True)
106
+
107
+ # for resp in streaming(raw_response):
108
+ # agent_return, node_name = resp
109
+ # if node_name:
110
+ # if node_name in ['root', 'response']:
111
+ # continue
112
+ # agent_return = agent_return['nodes'][node_name]['detail']
113
+ # if new_search_turn:
114
+ # history_searcher.append([agent_return['content'], ''])
115
+ # new_search_turn = False
116
+ # format_response(history_searcher, agent_return)
117
+ # if agent_return['state'] == AgentStatusCode.END:
118
+ # new_search_turn = True
119
+ # yield history_planner, history_searcher
120
+ # else:
121
+ # new_search_turn = True
122
+ # format_response(history_planner, agent_return)
123
+ # if agent_return['state'] == AgentStatusCode.END:
124
+ # PLANNER_HISTORY = agent_return['inner_steps']
125
+ # yield history_planner, history_searcher
126
+ # return history_planner, history_searcher
127
+
128
+
129
+ # with gr.Blocks() as demo:
130
+ # gr.HTML("""<h1 align="center">WebAgent Gradio Simple Demo</h1>""")
131
+ # with gr.Row():
132
+ # with gr.Column(scale=10):
133
+ # with gr.Row():
134
+ # with gr.Column():
135
+ # planner = gr.Chatbot(label='planner',
136
+ # height=700,
137
+ # show_label=True,
138
+ # show_copy_button=True,
139
+ # bubble_full_width=False,
140
+ # render_markdown=True)
141
+ # with gr.Column():
142
+ # searcher = gr.Chatbot(label='searcher',
143
+ # height=700,
144
+ # show_label=True,
145
+ # show_copy_button=True,
146
+ # bubble_full_width=False,
147
+ # render_markdown=True)
148
+ # with gr.Row():
149
+ # user_input = gr.Textbox(show_label=False,
150
+ # placeholder='inputs...',
151
+ # lines=5,
152
+ # container=False)
153
+ # with gr.Row():
154
+ # with gr.Column(scale=2):
155
+ # submitBtn = gr.Button('Submit')
156
+ # with gr.Column(scale=1, min_width=20):
157
+ # emptyBtn = gr.Button('Clear History')
158
+
159
+ # def user(query, history):
160
+ # return '', history + [[query, '']]
161
+
162
+ # submitBtn.click(user, [user_input, planner], [user_input, planner],
163
+ # queue=False).then(predict, [planner, searcher],
164
+ # [planner, searcher])
165
+ # emptyBtn.click(rst_mem, [planner, searcher], [planner, searcher],
166
+ # queue=False)
167
+
168
+ # # subprocess.Popen(["python", "-m", "mindsearch.app", "--lang", "en", "--model_format", "internlm_server"], shell=True, stdout=sys.stdout, stderr=sys.stderr)
169
 
170
  os.system("python -m mindsearch.app --lang en --model_format internlm_server &")
171
 
172
+ # demo.queue()
173
+ # demo.launch(server_name='0.0.0.0',
174
+ # server_port=7860,
175
+ # inbrowser=True,
176
+ # share=True)
177
 
178
+ # pass
frontend/React/vite.config.ts CHANGED
@@ -51,7 +51,7 @@ export default defineConfig({
51
  },
52
  },
53
  server: {
54
- port: 8080,
55
  proxy: {
56
  // "/solve": {
57
  // target: "...",
 
51
  },
52
  },
53
  server: {
54
+ port: 7860,
55
  proxy: {
56
  // "/solve": {
57
  // target: "...",