Spaces:
Sleeping
Sleeping
yitianlian
commited on
Commit
•
d50286d
1
Parent(s):
a1215f4
revise the default value of the demo
Browse files- .history/app_20240222100556.py +223 -0
- .history/app_20240222100644.py +224 -0
- .history/app_20240222100645.py +223 -0
- .history/app_20240222100647.py +222 -0
- .history/app_20240222100651.py +222 -0
- .history/app_20240222100652.py +222 -0
- app.py +4 -3
.history/app_20240222100556.py
ADDED
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
import openai
|
8 |
+
from camel.agents import ChatAgent
|
9 |
+
from camel.configs import ChatGPTConfig, OpenSourceConfig
|
10 |
+
from camel.messages import BaseMessage
|
11 |
+
from camel.types import ModelType, RoleType
|
12 |
+
|
13 |
+
from exp_model_class import ExtendedModelType
|
14 |
+
|
15 |
+
open_model_path_dict = {
|
16 |
+
ModelType.VICUNA: "lmsys/vicuna-7b-v1.3",
|
17 |
+
ModelType.LLAMA_2: "meta-llama/Llama-2-7b-chat-hf",
|
18 |
+
}
|
19 |
+
front = "you are a person not an ai model."
|
20 |
+
|
21 |
+
|
22 |
+
def str_mes(content):
|
23 |
+
return BaseMessage(
|
24 |
+
role_name="player",
|
25 |
+
role_type=RoleType.USER,
|
26 |
+
meta_dict={},
|
27 |
+
content=content,
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
def gpt3_res(prompt, model_name="text-davinci-003", temperature=1):
|
32 |
+
response = openai.completions.create(
|
33 |
+
model=model_name,
|
34 |
+
prompt=prompt,
|
35 |
+
temperature=temperature,
|
36 |
+
max_tokens=1500,
|
37 |
+
)
|
38 |
+
return response.choices[0].text.strip()
|
39 |
+
|
40 |
+
|
41 |
+
def get_res_for_visible(
|
42 |
+
role,
|
43 |
+
first_message,
|
44 |
+
game_type,
|
45 |
+
api_key,
|
46 |
+
model_type=ExtendedModelType.GPT_4,
|
47 |
+
extra_prompt="",
|
48 |
+
temperature=1.0,
|
49 |
+
player_demographic=None,
|
50 |
+
):
|
51 |
+
content = ""
|
52 |
+
if api_key is not None or api_key != "":
|
53 |
+
openai.api_key = api_key
|
54 |
+
else:
|
55 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
56 |
+
extra_prompt += "Your answer needs to include the content about your BELIEF, DESIRE and INTENTION."
|
57 |
+
if "game" in game_type.lower():
|
58 |
+
extra_prompt += "You must end with 'Finally, I will give ___ dollars ' (numbers are required in the spaces)."
|
59 |
+
else:
|
60 |
+
extra_prompt += "You must end with 'Finally, I will choose ___' ('Trust' or 'not Trust' are required in the spaces)."
|
61 |
+
extra_prompt += front
|
62 |
+
|
63 |
+
role = str_mes(role + extra_prompt)
|
64 |
+
if player_demographic is not None:
|
65 |
+
first_message = first_message.replace(
|
66 |
+
"player", player_demographic+" player")
|
67 |
+
first_message = str_mes(first_message)
|
68 |
+
if model_type in [
|
69 |
+
ExtendedModelType.INSTRUCT_GPT,
|
70 |
+
ExtendedModelType.GPT_3_5_TURBO_INSTRUCT,
|
71 |
+
]:
|
72 |
+
message = role.content + first_message.content + extra_prompt
|
73 |
+
final_res = str_mes(gpt3_res(message, model_type.value, temperature))
|
74 |
+
else:
|
75 |
+
role = str_mes(role.content + extra_prompt)
|
76 |
+
model_config = ChatGPTConfig(temperature=temperature)
|
77 |
+
if model_type in [
|
78 |
+
ModelType.VICUNA,
|
79 |
+
ModelType.LLAMA_2,
|
80 |
+
]:
|
81 |
+
open_source_config = dict(
|
82 |
+
model_type=model_type,
|
83 |
+
model_config=OpenSourceConfig(
|
84 |
+
model_path=open_model_path_dict[model_type],
|
85 |
+
server_url="http://localhost:8000/v1",
|
86 |
+
api_params=ChatGPTConfig(temperature=temperature),
|
87 |
+
),
|
88 |
+
)
|
89 |
+
agent = ChatAgent(
|
90 |
+
role, output_language="English", **(open_source_config or {})
|
91 |
+
)
|
92 |
+
else:
|
93 |
+
agent = ChatAgent(
|
94 |
+
role,
|
95 |
+
model_type=model_type,
|
96 |
+
output_language="English",
|
97 |
+
model_config=model_config,
|
98 |
+
)
|
99 |
+
final_all_res = agent.step(first_message)
|
100 |
+
final_res = final_all_res.msg
|
101 |
+
content += final_res.content
|
102 |
+
|
103 |
+
return content
|
104 |
+
|
105 |
+
|
106 |
+
sys.path.append("../..")
|
107 |
+
|
108 |
+
file_path_character_info = 'prompt/character_2.json'
|
109 |
+
file_path_game_prompts = 'prompt/person_all_game_prompt.json'
|
110 |
+
|
111 |
+
with open(file_path_character_info, 'r') as file:
|
112 |
+
character_info = json.load(file)
|
113 |
+
|
114 |
+
# Load game prompts
|
115 |
+
with open(file_path_game_prompts, 'r') as file:
|
116 |
+
game_prompts = json.load(file)
|
117 |
+
|
118 |
+
# Extract character names and information
|
119 |
+
characters = [f'Trustor Persona {i}' for i in range(
|
120 |
+
1, len(character_info) + 1)]
|
121 |
+
character_info = {f'Trustor Persona {i}': info for i, info in enumerate(
|
122 |
+
character_info.values(), start=1)}
|
123 |
+
|
124 |
+
# Extract game names and prompts
|
125 |
+
game_prompts = {
|
126 |
+
prompt[0]: prompt[-1] for i, prompt in enumerate(game_prompts.values(), start=1)}
|
127 |
+
games = list(game_prompts.keys())
|
128 |
+
print(games)
|
129 |
+
|
130 |
+
model_dict = {
|
131 |
+
'gpt-3.5-turbo-0613': ExtendedModelType.GPT_3_5_TURBO_0613,
|
132 |
+
'gpt-3.5-turbo-16k-0613': ExtendedModelType.GPT_3_5_TURBO_16K_0613,
|
133 |
+
'gpt-4': ExtendedModelType.GPT_4,
|
134 |
+
'text-davinci-003': ExtendedModelType.INSTRUCT_GPT,
|
135 |
+
'gpt-3.5-turbo-instruct': ExtendedModelType.GPT_3_5_TURBO_INSTRUCT,
|
136 |
+
# 'vicuna': ModelType.VICUNA,
|
137 |
+
# 'llama-2': ModelType.LLAMA_2,
|
138 |
+
}
|
139 |
+
game_tree_images = {
|
140 |
+
"Dictator_Game": "game_tree/dictator_game_game_tree.png",
|
141 |
+
"Trust_Game": "game_tree/Trust_game_game_tree.png",
|
142 |
+
"map_risky_dictator_problem": "game_tree/risky_dictator_game_game_tree.png",
|
143 |
+
"map_trust_problem": "game_tree/map_trust_game_game_tree.png",
|
144 |
+
"lottery_problem_people": "game_tree/lottery_people_game_tree.png",
|
145 |
+
"lottery_problem_gamble": "game_tree/lottery_gamble_game_tree.png"
|
146 |
+
}
|
147 |
+
|
148 |
+
models = list(model_dict.keys())
|
149 |
+
|
150 |
+
|
151 |
+
def update_char_info(char):
|
152 |
+
return character_info.get(char, "No information available.")
|
153 |
+
|
154 |
+
|
155 |
+
def update_game_prompt(game):
|
156 |
+
return game_prompts.get(game, "No prompt available.")
|
157 |
+
|
158 |
+
|
159 |
+
def process_submission(character, game, api_key=None, model="gpt-3.5-turbo-0613", extra_prompt="", temperature=1.0, player_demographic=None,):
|
160 |
+
if api_key is None or api_key == "":
|
161 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
162 |
+
else:
|
163 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
164 |
+
return get_res_for_visible(character_info.get(character, ""), game_prompts.get(game, "No prompt available."), game, api_key, model_dict[model], extra_prompt, temperature, player_demographic)
|
165 |
+
|
166 |
+
|
167 |
+
def update_game_image(game_name):
|
168 |
+
|
169 |
+
image_path = game_tree_images.get(game_name, None)
|
170 |
+
|
171 |
+
return image_path
|
172 |
+
|
173 |
+
|
174 |
+
with gr.Blocks() as app:
|
175 |
+
game_introduction = gr.Textbox(
|
176 |
+
label="Instruction", value="""1. You should select the persona for the trustor and the type of game.\n
|
177 |
+
2. You need to fill in your OpenAI API Key.
|
178 |
+
|
179 |
+
2. If you fill in 'Extra Prompt for Trustor', this prompt will be the additional system prompt to the trustor.\n
|
180 |
+
3. You can fill in the trustee player's demographics, such as race or gender.\n
|
181 |
+
4. If you want reset the conversation, please refresh this page.""")
|
182 |
+
with gr.Row():
|
183 |
+
char_dropdown = gr.Dropdown(
|
184 |
+
choices=characters, label="Select Trustor Persona", value=characters[0])
|
185 |
+
game_dropdown = gr.Dropdown(
|
186 |
+
choices=games, label="Select Game")
|
187 |
+
char_info_display = gr.Textbox(
|
188 |
+
label="Trustor Persona Info", value=character_info[characters[0]])
|
189 |
+
with gr.Row():
|
190 |
+
game_prompt_display = gr.Textbox(
|
191 |
+
label="Game Prompt", value=game_prompts["Trust_Game"])
|
192 |
+
game_image_display = gr.Image(
|
193 |
+
label="Game Image")
|
194 |
+
|
195 |
+
api_key_input = gr.Textbox(
|
196 |
+
label="OpenAI API Key", placeholder="Enter your OpenAI API Key here")
|
197 |
+
model_dropdown = gr.Dropdown(
|
198 |
+
choices=models, label="Select Model", value=models[0])
|
199 |
+
extra_prompt_input = gr.Textbox(
|
200 |
+
label="Extra Prompt for Trustor", placeholder="Enter any additional prompt here (Optional)")
|
201 |
+
temperature_slider = gr.Slider(
|
202 |
+
minimum=0.0, maximum=1.0, step=0.01, label="Temperature", value=1.0)
|
203 |
+
player_demographic_input = gr.Textbox(
|
204 |
+
label="Trustee Player Demographic", placeholder="Enter trustee player demographic info here (Optional)")
|
205 |
+
submit_button = gr.Button("Submit")
|
206 |
+
result_display = gr.Textbox(label="Result")
|
207 |
+
|
208 |
+
# 更新显示信息
|
209 |
+
char_dropdown.change(
|
210 |
+
update_char_info, inputs=char_dropdown, outputs=char_info_display)
|
211 |
+
game_dropdown.change(update_game_prompt,
|
212 |
+
inputs=game_dropdown, outputs=game_prompt_display)
|
213 |
+
game_dropdown.change(
|
214 |
+
update_game_image, inputs=game_dropdown, outputs=game_image_display)
|
215 |
+
|
216 |
+
submit_button.click(
|
217 |
+
process_submission,
|
218 |
+
inputs=[char_dropdown, game_dropdown, api_key_input, model_dropdown,
|
219 |
+
extra_prompt_input, temperature_slider, player_demographic_input],
|
220 |
+
outputs=result_display
|
221 |
+
)
|
222 |
+
|
223 |
+
app.launch()
|
.history/app_20240222100644.py
ADDED
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
import openai
|
8 |
+
from camel.agents import ChatAgent
|
9 |
+
from camel.configs import ChatGPTConfig, OpenSourceConfig
|
10 |
+
from camel.messages import BaseMessage
|
11 |
+
from camel.types import ModelType, RoleType
|
12 |
+
|
13 |
+
from exp_model_class import ExtendedModelType
|
14 |
+
|
15 |
+
open_model_path_dict = {
|
16 |
+
ModelType.VICUNA: "lmsys/vicuna-7b-v1.3",
|
17 |
+
ModelType.LLAMA_2: "meta-llama/Llama-2-7b-chat-hf",
|
18 |
+
}
|
19 |
+
front = "you are a person not an ai model."
|
20 |
+
|
21 |
+
|
22 |
+
def str_mes(content):
|
23 |
+
return BaseMessage(
|
24 |
+
role_name="player",
|
25 |
+
role_type=RoleType.USER,
|
26 |
+
meta_dict={},
|
27 |
+
content=content,
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
def gpt3_res(prompt, model_name="text-davinci-003", temperature=1):
|
32 |
+
response = openai.completions.create(
|
33 |
+
model=model_name,
|
34 |
+
prompt=prompt,
|
35 |
+
temperature=temperature,
|
36 |
+
max_tokens=1500,
|
37 |
+
)
|
38 |
+
return response.choices[0].text.strip()
|
39 |
+
|
40 |
+
|
41 |
+
def get_res_for_visible(
|
42 |
+
role,
|
43 |
+
first_message,
|
44 |
+
game_type,
|
45 |
+
api_key,
|
46 |
+
model_type=ExtendedModelType.GPT_4,
|
47 |
+
extra_prompt="",
|
48 |
+
temperature=1.0,
|
49 |
+
player_demographic=None,
|
50 |
+
):
|
51 |
+
content = ""
|
52 |
+
if api_key is not None or api_key != "":
|
53 |
+
openai.api_key = api_key
|
54 |
+
else:
|
55 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
56 |
+
extra_prompt += "Your answer needs to include the content about your BELIEF, DESIRE and INTENTION."
|
57 |
+
if "game" in game_type.lower():
|
58 |
+
extra_prompt += "You must end with 'Finally, I will give ___ dollars ' (numbers are required in the spaces)."
|
59 |
+
else:
|
60 |
+
extra_prompt += "You must end with 'Finally, I will choose ___' ('Trust' or 'not Trust' are required in the spaces)."
|
61 |
+
extra_prompt += front
|
62 |
+
|
63 |
+
role = str_mes(role + extra_prompt)
|
64 |
+
if player_demographic is not None:
|
65 |
+
first_message = first_message.replace(
|
66 |
+
"player", player_demographic+" player")
|
67 |
+
first_message = str_mes(first_message)
|
68 |
+
if model_type in [
|
69 |
+
ExtendedModelType.INSTRUCT_GPT,
|
70 |
+
ExtendedModelType.GPT_3_5_TURBO_INSTRUCT,
|
71 |
+
]:
|
72 |
+
message = role.content + first_message.content + extra_prompt
|
73 |
+
final_res = str_mes(gpt3_res(message, model_type.value, temperature))
|
74 |
+
else:
|
75 |
+
role = str_mes(role.content + extra_prompt)
|
76 |
+
model_config = ChatGPTConfig(temperature=temperature)
|
77 |
+
if model_type in [
|
78 |
+
ModelType.VICUNA,
|
79 |
+
ModelType.LLAMA_2,
|
80 |
+
]:
|
81 |
+
open_source_config = dict(
|
82 |
+
model_type=model_type,
|
83 |
+
model_config=OpenSourceConfig(
|
84 |
+
model_path=open_model_path_dict[model_type],
|
85 |
+
server_url="http://localhost:8000/v1",
|
86 |
+
api_params=ChatGPTConfig(temperature=temperature),
|
87 |
+
),
|
88 |
+
)
|
89 |
+
agent = ChatAgent(
|
90 |
+
role, output_language="English", **(open_source_config or {})
|
91 |
+
)
|
92 |
+
else:
|
93 |
+
agent = ChatAgent(
|
94 |
+
role,
|
95 |
+
model_type=model_type,
|
96 |
+
output_language="English",
|
97 |
+
model_config=model_config,
|
98 |
+
)
|
99 |
+
final_all_res = agent.step(first_message)
|
100 |
+
final_res = final_all_res.msg
|
101 |
+
content += final_res.content
|
102 |
+
|
103 |
+
return content
|
104 |
+
|
105 |
+
|
106 |
+
sys.path.append("../..")
|
107 |
+
|
108 |
+
file_path_character_info = 'prompt/character_2.json'
|
109 |
+
file_path_game_prompts = 'prompt/person_all_game_prompt.json'
|
110 |
+
|
111 |
+
with open(file_path_character_info, 'r') as file:
|
112 |
+
character_info = json.load(file)
|
113 |
+
|
114 |
+
# Load game prompts
|
115 |
+
with open(file_path_game_prompts, 'r') as file:
|
116 |
+
game_prompts = json.load(file)
|
117 |
+
|
118 |
+
# Extract character names and information
|
119 |
+
characters = [f'Trustor Persona {i}' for i in range(
|
120 |
+
1, len(character_info) + 1)]
|
121 |
+
character_info = {f'Trustor Persona {i}': info for i, info in enumerate(
|
122 |
+
character_info.values(), start=1)}
|
123 |
+
|
124 |
+
# Extract game names and prompts
|
125 |
+
game_prompts = {
|
126 |
+
prompt[0]: prompt[-1] for i, prompt in enumerate(game_prompts.values(), start=1)}
|
127 |
+
games = list(game_prompts.keys())
|
128 |
+
print(games)
|
129 |
+
|
130 |
+
model_dict = {
|
131 |
+
'gpt-3.5-turbo-0613': ExtendedModelType.GPT_3_5_TURBO_0613,
|
132 |
+
'gpt-3.5-turbo-16k-0613': ExtendedModelType.GPT_3_5_TURBO_16K_0613,
|
133 |
+
'gpt-4': ExtendedModelType.GPT_4,
|
134 |
+
'text-davinci-003': ExtendedModelType.INSTRUCT_GPT,
|
135 |
+
'gpt-3.5-turbo-instruct': ExtendedModelType.GPT_3_5_TURBO_INSTRUCT,
|
136 |
+
# 'vicuna': ModelType.VICUNA,
|
137 |
+
# 'llama-2': ModelType.LLAMA_2,
|
138 |
+
}
|
139 |
+
game_tree_images = {
|
140 |
+
"Dictator_Game": "game_tree/dictator_game_game_tree.png",
|
141 |
+
"Trust_Game": "game_tree/Trust_game_game_tree.png",
|
142 |
+
"map_risky_dictator_problem": "game_tree/risky_dictator_game_game_tree.png",
|
143 |
+
"map_trust_problem": "game_tree/map_trust_game_game_tree.png",
|
144 |
+
"lottery_problem_people": "game_tree/lottery_people_game_tree.png",
|
145 |
+
"lottery_problem_gamble": "game_tree/lottery_gamble_game_tree.png"
|
146 |
+
}
|
147 |
+
|
148 |
+
models = list(model_dict.keys())
|
149 |
+
|
150 |
+
|
151 |
+
def update_char_info(char):
|
152 |
+
return character_info.get(char, "No information available.")
|
153 |
+
|
154 |
+
|
155 |
+
def update_game_prompt(game):
|
156 |
+
return game_prompts.get(game, "No prompt available.")
|
157 |
+
|
158 |
+
|
159 |
+
def process_submission(character, game, api_key=None, model="gpt-3.5-turbo-0613", extra_prompt="", temperature=1.0, player_demographic=None,):
|
160 |
+
if api_key is None or api_key == "":
|
161 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
162 |
+
else:
|
163 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
164 |
+
return get_res_for_visible(character_info.get(character, ""), game_prompts.get(game, "No prompt available."), game, api_key, model_dict[model], extra_prompt, temperature, player_demographic)
|
165 |
+
|
166 |
+
|
167 |
+
def update_game_image(game_name):
|
168 |
+
|
169 |
+
image_path = game_tree_images.get(game_name, None)
|
170 |
+
|
171 |
+
return image_path
|
172 |
+
|
173 |
+
|
174 |
+
with gr.Blocks() as app:
|
175 |
+
game_introduction = gr.Textbox(
|
176 |
+
label="Instruction", value="""1. You should select the persona for the trustor and the type of game.\n
|
177 |
+
2. You need to fill in your OpenAI API Key.\n
|
178 |
+
|
179 |
+
|
180 |
+
2. If you fill in 'Extra Prompt for Trustor', this prompt will be the additional system prompt to the trustor.\n
|
181 |
+
3. You can fill in the trustee player's demographics, such as race or gender.\n
|
182 |
+
4. If you want reset the conversation, please refresh this page.""")
|
183 |
+
with gr.Row():
|
184 |
+
char_dropdown = gr.Dropdown(
|
185 |
+
choices=characters, label="Select Trustor Persona", value=characters[0])
|
186 |
+
game_dropdown = gr.Dropdown(
|
187 |
+
choices=games, label="Select Game")
|
188 |
+
char_info_display = gr.Textbox(
|
189 |
+
label="Trustor Persona Info", value=character_info[characters[0]])
|
190 |
+
with gr.Row():
|
191 |
+
game_prompt_display = gr.Textbox(
|
192 |
+
label="Game Prompt", value=game_prompts["Trust_Game"])
|
193 |
+
game_image_display = gr.Image(
|
194 |
+
label="Game Image")
|
195 |
+
|
196 |
+
api_key_input = gr.Textbox(
|
197 |
+
label="OpenAI API Key", placeholder="Enter your OpenAI API Key here")
|
198 |
+
model_dropdown = gr.Dropdown(
|
199 |
+
choices=models, label="Select Model", value=models[0])
|
200 |
+
extra_prompt_input = gr.Textbox(
|
201 |
+
label="Extra Prompt for Trustor", placeholder="Enter any additional prompt here (Optional)")
|
202 |
+
temperature_slider = gr.Slider(
|
203 |
+
minimum=0.0, maximum=1.0, step=0.01, label="Temperature", value=1.0)
|
204 |
+
player_demographic_input = gr.Textbox(
|
205 |
+
label="Trustee Player Demographic", placeholder="Enter trustee player demographic info here (Optional)")
|
206 |
+
submit_button = gr.Button("Submit")
|
207 |
+
result_display = gr.Textbox(label="Result")
|
208 |
+
|
209 |
+
# 更新显示信息
|
210 |
+
char_dropdown.change(
|
211 |
+
update_char_info, inputs=char_dropdown, outputs=char_info_display)
|
212 |
+
game_dropdown.change(update_game_prompt,
|
213 |
+
inputs=game_dropdown, outputs=game_prompt_display)
|
214 |
+
game_dropdown.change(
|
215 |
+
update_game_image, inputs=game_dropdown, outputs=game_image_display)
|
216 |
+
|
217 |
+
submit_button.click(
|
218 |
+
process_submission,
|
219 |
+
inputs=[char_dropdown, game_dropdown, api_key_input, model_dropdown,
|
220 |
+
extra_prompt_input, temperature_slider, player_demographic_input],
|
221 |
+
outputs=result_display
|
222 |
+
)
|
223 |
+
|
224 |
+
app.launch()
|
.history/app_20240222100645.py
ADDED
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
import openai
|
8 |
+
from camel.agents import ChatAgent
|
9 |
+
from camel.configs import ChatGPTConfig, OpenSourceConfig
|
10 |
+
from camel.messages import BaseMessage
|
11 |
+
from camel.types import ModelType, RoleType
|
12 |
+
|
13 |
+
from exp_model_class import ExtendedModelType
|
14 |
+
|
15 |
+
open_model_path_dict = {
|
16 |
+
ModelType.VICUNA: "lmsys/vicuna-7b-v1.3",
|
17 |
+
ModelType.LLAMA_2: "meta-llama/Llama-2-7b-chat-hf",
|
18 |
+
}
|
19 |
+
front = "you are a person not an ai model."
|
20 |
+
|
21 |
+
|
22 |
+
def str_mes(content):
|
23 |
+
return BaseMessage(
|
24 |
+
role_name="player",
|
25 |
+
role_type=RoleType.USER,
|
26 |
+
meta_dict={},
|
27 |
+
content=content,
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
def gpt3_res(prompt, model_name="text-davinci-003", temperature=1):
|
32 |
+
response = openai.completions.create(
|
33 |
+
model=model_name,
|
34 |
+
prompt=prompt,
|
35 |
+
temperature=temperature,
|
36 |
+
max_tokens=1500,
|
37 |
+
)
|
38 |
+
return response.choices[0].text.strip()
|
39 |
+
|
40 |
+
|
41 |
+
def get_res_for_visible(
|
42 |
+
role,
|
43 |
+
first_message,
|
44 |
+
game_type,
|
45 |
+
api_key,
|
46 |
+
model_type=ExtendedModelType.GPT_4,
|
47 |
+
extra_prompt="",
|
48 |
+
temperature=1.0,
|
49 |
+
player_demographic=None,
|
50 |
+
):
|
51 |
+
content = ""
|
52 |
+
if api_key is not None or api_key != "":
|
53 |
+
openai.api_key = api_key
|
54 |
+
else:
|
55 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
56 |
+
extra_prompt += "Your answer needs to include the content about your BELIEF, DESIRE and INTENTION."
|
57 |
+
if "game" in game_type.lower():
|
58 |
+
extra_prompt += "You must end with 'Finally, I will give ___ dollars ' (numbers are required in the spaces)."
|
59 |
+
else:
|
60 |
+
extra_prompt += "You must end with 'Finally, I will choose ___' ('Trust' or 'not Trust' are required in the spaces)."
|
61 |
+
extra_prompt += front
|
62 |
+
|
63 |
+
role = str_mes(role + extra_prompt)
|
64 |
+
if player_demographic is not None:
|
65 |
+
first_message = first_message.replace(
|
66 |
+
"player", player_demographic+" player")
|
67 |
+
first_message = str_mes(first_message)
|
68 |
+
if model_type in [
|
69 |
+
ExtendedModelType.INSTRUCT_GPT,
|
70 |
+
ExtendedModelType.GPT_3_5_TURBO_INSTRUCT,
|
71 |
+
]:
|
72 |
+
message = role.content + first_message.content + extra_prompt
|
73 |
+
final_res = str_mes(gpt3_res(message, model_type.value, temperature))
|
74 |
+
else:
|
75 |
+
role = str_mes(role.content + extra_prompt)
|
76 |
+
model_config = ChatGPTConfig(temperature=temperature)
|
77 |
+
if model_type in [
|
78 |
+
ModelType.VICUNA,
|
79 |
+
ModelType.LLAMA_2,
|
80 |
+
]:
|
81 |
+
open_source_config = dict(
|
82 |
+
model_type=model_type,
|
83 |
+
model_config=OpenSourceConfig(
|
84 |
+
model_path=open_model_path_dict[model_type],
|
85 |
+
server_url="http://localhost:8000/v1",
|
86 |
+
api_params=ChatGPTConfig(temperature=temperature),
|
87 |
+
),
|
88 |
+
)
|
89 |
+
agent = ChatAgent(
|
90 |
+
role, output_language="English", **(open_source_config or {})
|
91 |
+
)
|
92 |
+
else:
|
93 |
+
agent = ChatAgent(
|
94 |
+
role,
|
95 |
+
model_type=model_type,
|
96 |
+
output_language="English",
|
97 |
+
model_config=model_config,
|
98 |
+
)
|
99 |
+
final_all_res = agent.step(first_message)
|
100 |
+
final_res = final_all_res.msg
|
101 |
+
content += final_res.content
|
102 |
+
|
103 |
+
return content
|
104 |
+
|
105 |
+
|
106 |
+
sys.path.append("../..")
|
107 |
+
|
108 |
+
file_path_character_info = 'prompt/character_2.json'
|
109 |
+
file_path_game_prompts = 'prompt/person_all_game_prompt.json'
|
110 |
+
|
111 |
+
with open(file_path_character_info, 'r') as file:
|
112 |
+
character_info = json.load(file)
|
113 |
+
|
114 |
+
# Load game prompts
|
115 |
+
with open(file_path_game_prompts, 'r') as file:
|
116 |
+
game_prompts = json.load(file)
|
117 |
+
|
118 |
+
# Extract character names and information
|
119 |
+
characters = [f'Trustor Persona {i}' for i in range(
|
120 |
+
1, len(character_info) + 1)]
|
121 |
+
character_info = {f'Trustor Persona {i}': info for i, info in enumerate(
|
122 |
+
character_info.values(), start=1)}
|
123 |
+
|
124 |
+
# Extract game names and prompts
|
125 |
+
game_prompts = {
|
126 |
+
prompt[0]: prompt[-1] for i, prompt in enumerate(game_prompts.values(), start=1)}
|
127 |
+
games = list(game_prompts.keys())
|
128 |
+
print(games)
|
129 |
+
|
130 |
+
model_dict = {
|
131 |
+
'gpt-3.5-turbo-0613': ExtendedModelType.GPT_3_5_TURBO_0613,
|
132 |
+
'gpt-3.5-turbo-16k-0613': ExtendedModelType.GPT_3_5_TURBO_16K_0613,
|
133 |
+
'gpt-4': ExtendedModelType.GPT_4,
|
134 |
+
'text-davinci-003': ExtendedModelType.INSTRUCT_GPT,
|
135 |
+
'gpt-3.5-turbo-instruct': ExtendedModelType.GPT_3_5_TURBO_INSTRUCT,
|
136 |
+
# 'vicuna': ModelType.VICUNA,
|
137 |
+
# 'llama-2': ModelType.LLAMA_2,
|
138 |
+
}
|
139 |
+
game_tree_images = {
|
140 |
+
"Dictator_Game": "game_tree/dictator_game_game_tree.png",
|
141 |
+
"Trust_Game": "game_tree/Trust_game_game_tree.png",
|
142 |
+
"map_risky_dictator_problem": "game_tree/risky_dictator_game_game_tree.png",
|
143 |
+
"map_trust_problem": "game_tree/map_trust_game_game_tree.png",
|
144 |
+
"lottery_problem_people": "game_tree/lottery_people_game_tree.png",
|
145 |
+
"lottery_problem_gamble": "game_tree/lottery_gamble_game_tree.png"
|
146 |
+
}
|
147 |
+
|
148 |
+
models = list(model_dict.keys())
|
149 |
+
|
150 |
+
|
151 |
+
def update_char_info(char):
|
152 |
+
return character_info.get(char, "No information available.")
|
153 |
+
|
154 |
+
|
155 |
+
def update_game_prompt(game):
|
156 |
+
return game_prompts.get(game, "No prompt available.")
|
157 |
+
|
158 |
+
|
159 |
+
def process_submission(character, game, api_key=None, model="gpt-3.5-turbo-0613", extra_prompt="", temperature=1.0, player_demographic=None,):
|
160 |
+
if api_key is None or api_key == "":
|
161 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
162 |
+
else:
|
163 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
164 |
+
return get_res_for_visible(character_info.get(character, ""), game_prompts.get(game, "No prompt available."), game, api_key, model_dict[model], extra_prompt, temperature, player_demographic)
|
165 |
+
|
166 |
+
|
167 |
+
def update_game_image(game_name):
|
168 |
+
|
169 |
+
image_path = game_tree_images.get(game_name, None)
|
170 |
+
|
171 |
+
return image_path
|
172 |
+
|
173 |
+
|
174 |
+
with gr.Blocks() as app:
|
175 |
+
game_introduction = gr.Textbox(
|
176 |
+
label="Instruction", value="""1. You should select the persona for the trustor and the type of game.\n
|
177 |
+
2. You need to fill in your OpenAI API Key.\n
|
178 |
+
|
179 |
+
2. If you fill in 'Extra Prompt for Trustor', this prompt will be the additional system prompt to the trustor.\n
|
180 |
+
3. You can fill in the trustee player's demographics, such as race or gender.\n
|
181 |
+
4. If you want reset the conversation, please refresh this page.""")
|
182 |
+
with gr.Row():
|
183 |
+
char_dropdown = gr.Dropdown(
|
184 |
+
choices=characters, label="Select Trustor Persona", value=characters[0])
|
185 |
+
game_dropdown = gr.Dropdown(
|
186 |
+
choices=games, label="Select Game")
|
187 |
+
char_info_display = gr.Textbox(
|
188 |
+
label="Trustor Persona Info", value=character_info[characters[0]])
|
189 |
+
with gr.Row():
|
190 |
+
game_prompt_display = gr.Textbox(
|
191 |
+
label="Game Prompt", value=game_prompts["Trust_Game"])
|
192 |
+
game_image_display = gr.Image(
|
193 |
+
label="Game Image")
|
194 |
+
|
195 |
+
api_key_input = gr.Textbox(
|
196 |
+
label="OpenAI API Key", placeholder="Enter your OpenAI API Key here")
|
197 |
+
model_dropdown = gr.Dropdown(
|
198 |
+
choices=models, label="Select Model", value=models[0])
|
199 |
+
extra_prompt_input = gr.Textbox(
|
200 |
+
label="Extra Prompt for Trustor", placeholder="Enter any additional prompt here (Optional)")
|
201 |
+
temperature_slider = gr.Slider(
|
202 |
+
minimum=0.0, maximum=1.0, step=0.01, label="Temperature", value=1.0)
|
203 |
+
player_demographic_input = gr.Textbox(
|
204 |
+
label="Trustee Player Demographic", placeholder="Enter trustee player demographic info here (Optional)")
|
205 |
+
submit_button = gr.Button("Submit")
|
206 |
+
result_display = gr.Textbox(label="Result")
|
207 |
+
|
208 |
+
# 更新显示信息
|
209 |
+
char_dropdown.change(
|
210 |
+
update_char_info, inputs=char_dropdown, outputs=char_info_display)
|
211 |
+
game_dropdown.change(update_game_prompt,
|
212 |
+
inputs=game_dropdown, outputs=game_prompt_display)
|
213 |
+
game_dropdown.change(
|
214 |
+
update_game_image, inputs=game_dropdown, outputs=game_image_display)
|
215 |
+
|
216 |
+
submit_button.click(
|
217 |
+
process_submission,
|
218 |
+
inputs=[char_dropdown, game_dropdown, api_key_input, model_dropdown,
|
219 |
+
extra_prompt_input, temperature_slider, player_demographic_input],
|
220 |
+
outputs=result_display
|
221 |
+
)
|
222 |
+
|
223 |
+
app.launch()
|
.history/app_20240222100647.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
import openai
|
8 |
+
from camel.agents import ChatAgent
|
9 |
+
from camel.configs import ChatGPTConfig, OpenSourceConfig
|
10 |
+
from camel.messages import BaseMessage
|
11 |
+
from camel.types import ModelType, RoleType
|
12 |
+
|
13 |
+
from exp_model_class import ExtendedModelType
|
14 |
+
|
15 |
+
open_model_path_dict = {
|
16 |
+
ModelType.VICUNA: "lmsys/vicuna-7b-v1.3",
|
17 |
+
ModelType.LLAMA_2: "meta-llama/Llama-2-7b-chat-hf",
|
18 |
+
}
|
19 |
+
front = "you are a person not an ai model."
|
20 |
+
|
21 |
+
|
22 |
+
def str_mes(content):
|
23 |
+
return BaseMessage(
|
24 |
+
role_name="player",
|
25 |
+
role_type=RoleType.USER,
|
26 |
+
meta_dict={},
|
27 |
+
content=content,
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
def gpt3_res(prompt, model_name="text-davinci-003", temperature=1):
|
32 |
+
response = openai.completions.create(
|
33 |
+
model=model_name,
|
34 |
+
prompt=prompt,
|
35 |
+
temperature=temperature,
|
36 |
+
max_tokens=1500,
|
37 |
+
)
|
38 |
+
return response.choices[0].text.strip()
|
39 |
+
|
40 |
+
|
41 |
+
def get_res_for_visible(
|
42 |
+
role,
|
43 |
+
first_message,
|
44 |
+
game_type,
|
45 |
+
api_key,
|
46 |
+
model_type=ExtendedModelType.GPT_4,
|
47 |
+
extra_prompt="",
|
48 |
+
temperature=1.0,
|
49 |
+
player_demographic=None,
|
50 |
+
):
|
51 |
+
content = ""
|
52 |
+
if api_key is not None or api_key != "":
|
53 |
+
openai.api_key = api_key
|
54 |
+
else:
|
55 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
56 |
+
extra_prompt += "Your answer needs to include the content about your BELIEF, DESIRE and INTENTION."
|
57 |
+
if "game" in game_type.lower():
|
58 |
+
extra_prompt += "You must end with 'Finally, I will give ___ dollars ' (numbers are required in the spaces)."
|
59 |
+
else:
|
60 |
+
extra_prompt += "You must end with 'Finally, I will choose ___' ('Trust' or 'not Trust' are required in the spaces)."
|
61 |
+
extra_prompt += front
|
62 |
+
|
63 |
+
role = str_mes(role + extra_prompt)
|
64 |
+
if player_demographic is not None:
|
65 |
+
first_message = first_message.replace(
|
66 |
+
"player", player_demographic+" player")
|
67 |
+
first_message = str_mes(first_message)
|
68 |
+
if model_type in [
|
69 |
+
ExtendedModelType.INSTRUCT_GPT,
|
70 |
+
ExtendedModelType.GPT_3_5_TURBO_INSTRUCT,
|
71 |
+
]:
|
72 |
+
message = role.content + first_message.content + extra_prompt
|
73 |
+
final_res = str_mes(gpt3_res(message, model_type.value, temperature))
|
74 |
+
else:
|
75 |
+
role = str_mes(role.content + extra_prompt)
|
76 |
+
model_config = ChatGPTConfig(temperature=temperature)
|
77 |
+
if model_type in [
|
78 |
+
ModelType.VICUNA,
|
79 |
+
ModelType.LLAMA_2,
|
80 |
+
]:
|
81 |
+
open_source_config = dict(
|
82 |
+
model_type=model_type,
|
83 |
+
model_config=OpenSourceConfig(
|
84 |
+
model_path=open_model_path_dict[model_type],
|
85 |
+
server_url="http://localhost:8000/v1",
|
86 |
+
api_params=ChatGPTConfig(temperature=temperature),
|
87 |
+
),
|
88 |
+
)
|
89 |
+
agent = ChatAgent(
|
90 |
+
role, output_language="English", **(open_source_config or {})
|
91 |
+
)
|
92 |
+
else:
|
93 |
+
agent = ChatAgent(
|
94 |
+
role,
|
95 |
+
model_type=model_type,
|
96 |
+
output_language="English",
|
97 |
+
model_config=model_config,
|
98 |
+
)
|
99 |
+
final_all_res = agent.step(first_message)
|
100 |
+
final_res = final_all_res.msg
|
101 |
+
content += final_res.content
|
102 |
+
|
103 |
+
return content
|
104 |
+
|
105 |
+
|
106 |
+
sys.path.append("../..")
|
107 |
+
|
108 |
+
file_path_character_info = 'prompt/character_2.json'
|
109 |
+
file_path_game_prompts = 'prompt/person_all_game_prompt.json'
|
110 |
+
|
111 |
+
with open(file_path_character_info, 'r') as file:
|
112 |
+
character_info = json.load(file)
|
113 |
+
|
114 |
+
# Load game prompts
|
115 |
+
with open(file_path_game_prompts, 'r') as file:
|
116 |
+
game_prompts = json.load(file)
|
117 |
+
|
118 |
+
# Extract character names and information
|
119 |
+
characters = [f'Trustor Persona {i}' for i in range(
|
120 |
+
1, len(character_info) + 1)]
|
121 |
+
character_info = {f'Trustor Persona {i}': info for i, info in enumerate(
|
122 |
+
character_info.values(), start=1)}
|
123 |
+
|
124 |
+
# Extract game names and prompts
|
125 |
+
game_prompts = {
|
126 |
+
prompt[0]: prompt[-1] for i, prompt in enumerate(game_prompts.values(), start=1)}
|
127 |
+
games = list(game_prompts.keys())
|
128 |
+
print(games)
|
129 |
+
|
130 |
+
model_dict = {
|
131 |
+
'gpt-3.5-turbo-0613': ExtendedModelType.GPT_3_5_TURBO_0613,
|
132 |
+
'gpt-3.5-turbo-16k-0613': ExtendedModelType.GPT_3_5_TURBO_16K_0613,
|
133 |
+
'gpt-4': ExtendedModelType.GPT_4,
|
134 |
+
'text-davinci-003': ExtendedModelType.INSTRUCT_GPT,
|
135 |
+
'gpt-3.5-turbo-instruct': ExtendedModelType.GPT_3_5_TURBO_INSTRUCT,
|
136 |
+
# 'vicuna': ModelType.VICUNA,
|
137 |
+
# 'llama-2': ModelType.LLAMA_2,
|
138 |
+
}
|
139 |
+
game_tree_images = {
|
140 |
+
"Dictator_Game": "game_tree/dictator_game_game_tree.png",
|
141 |
+
"Trust_Game": "game_tree/Trust_game_game_tree.png",
|
142 |
+
"map_risky_dictator_problem": "game_tree/risky_dictator_game_game_tree.png",
|
143 |
+
"map_trust_problem": "game_tree/map_trust_game_game_tree.png",
|
144 |
+
"lottery_problem_people": "game_tree/lottery_people_game_tree.png",
|
145 |
+
"lottery_problem_gamble": "game_tree/lottery_gamble_game_tree.png"
|
146 |
+
}
|
147 |
+
|
148 |
+
models = list(model_dict.keys())
|
149 |
+
|
150 |
+
|
151 |
+
def update_char_info(char):
|
152 |
+
return character_info.get(char, "No information available.")
|
153 |
+
|
154 |
+
|
155 |
+
def update_game_prompt(game):
|
156 |
+
return game_prompts.get(game, "No prompt available.")
|
157 |
+
|
158 |
+
|
159 |
+
def process_submission(character, game, api_key=None, model="gpt-3.5-turbo-0613", extra_prompt="", temperature=1.0, player_demographic=None,):
|
160 |
+
if api_key is None or api_key == "":
|
161 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
162 |
+
else:
|
163 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
164 |
+
return get_res_for_visible(character_info.get(character, ""), game_prompts.get(game, "No prompt available."), game, api_key, model_dict[model], extra_prompt, temperature, player_demographic)
|
165 |
+
|
166 |
+
|
167 |
+
def update_game_image(game_name):
|
168 |
+
|
169 |
+
image_path = game_tree_images.get(game_name, None)
|
170 |
+
|
171 |
+
return image_path
|
172 |
+
|
173 |
+
|
174 |
+
with gr.Blocks() as app:
|
175 |
+
game_introduction = gr.Textbox(
|
176 |
+
label="Instruction", value="""1. You should select the persona for the trustor and the type of game.\n
|
177 |
+
2. You need to fill in your OpenAI API Key.\n
|
178 |
+
2. If you fill in 'Extra Prompt for Trustor', this prompt will be the additional system prompt to the trustor.\n
|
179 |
+
3. You can fill in the trustee player's demographics, such as race or gender.\n
|
180 |
+
4. If you want reset the conversation, please refresh this page.""")
|
181 |
+
with gr.Row():
|
182 |
+
char_dropdown = gr.Dropdown(
|
183 |
+
choices=characters, label="Select Trustor Persona", value=characters[0])
|
184 |
+
game_dropdown = gr.Dropdown(
|
185 |
+
choices=games, label="Select Game")
|
186 |
+
char_info_display = gr.Textbox(
|
187 |
+
label="Trustor Persona Info", value=character_info[characters[0]])
|
188 |
+
with gr.Row():
|
189 |
+
game_prompt_display = gr.Textbox(
|
190 |
+
label="Game Prompt", value=game_prompts["Trust_Game"])
|
191 |
+
game_image_display = gr.Image(
|
192 |
+
label="Game Image")
|
193 |
+
|
194 |
+
api_key_input = gr.Textbox(
|
195 |
+
label="OpenAI API Key", placeholder="Enter your OpenAI API Key here")
|
196 |
+
model_dropdown = gr.Dropdown(
|
197 |
+
choices=models, label="Select Model", value=models[0])
|
198 |
+
extra_prompt_input = gr.Textbox(
|
199 |
+
label="Extra Prompt for Trustor", placeholder="Enter any additional prompt here (Optional)")
|
200 |
+
temperature_slider = gr.Slider(
|
201 |
+
minimum=0.0, maximum=1.0, step=0.01, label="Temperature", value=1.0)
|
202 |
+
player_demographic_input = gr.Textbox(
|
203 |
+
label="Trustee Player Demographic", placeholder="Enter trustee player demographic info here (Optional)")
|
204 |
+
submit_button = gr.Button("Submit")
|
205 |
+
result_display = gr.Textbox(label="Result")
|
206 |
+
|
207 |
+
# 更新显示信息
|
208 |
+
char_dropdown.change(
|
209 |
+
update_char_info, inputs=char_dropdown, outputs=char_info_display)
|
210 |
+
game_dropdown.change(update_game_prompt,
|
211 |
+
inputs=game_dropdown, outputs=game_prompt_display)
|
212 |
+
game_dropdown.change(
|
213 |
+
update_game_image, inputs=game_dropdown, outputs=game_image_display)
|
214 |
+
|
215 |
+
submit_button.click(
|
216 |
+
process_submission,
|
217 |
+
inputs=[char_dropdown, game_dropdown, api_key_input, model_dropdown,
|
218 |
+
extra_prompt_input, temperature_slider, player_demographic_input],
|
219 |
+
outputs=result_display
|
220 |
+
)
|
221 |
+
|
222 |
+
app.launch()
|
.history/app_20240222100651.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
import openai
|
8 |
+
from camel.agents import ChatAgent
|
9 |
+
from camel.configs import ChatGPTConfig, OpenSourceConfig
|
10 |
+
from camel.messages import BaseMessage
|
11 |
+
from camel.types import ModelType, RoleType
|
12 |
+
|
13 |
+
from exp_model_class import ExtendedModelType
|
14 |
+
|
15 |
+
open_model_path_dict = {
|
16 |
+
ModelType.VICUNA: "lmsys/vicuna-7b-v1.3",
|
17 |
+
ModelType.LLAMA_2: "meta-llama/Llama-2-7b-chat-hf",
|
18 |
+
}
|
19 |
+
front = "you are a person not an ai model."
|
20 |
+
|
21 |
+
|
22 |
+
def str_mes(content):
|
23 |
+
return BaseMessage(
|
24 |
+
role_name="player",
|
25 |
+
role_type=RoleType.USER,
|
26 |
+
meta_dict={},
|
27 |
+
content=content,
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
def gpt3_res(prompt, model_name="text-davinci-003", temperature=1):
|
32 |
+
response = openai.completions.create(
|
33 |
+
model=model_name,
|
34 |
+
prompt=prompt,
|
35 |
+
temperature=temperature,
|
36 |
+
max_tokens=1500,
|
37 |
+
)
|
38 |
+
return response.choices[0].text.strip()
|
39 |
+
|
40 |
+
|
41 |
+
def get_res_for_visible(
|
42 |
+
role,
|
43 |
+
first_message,
|
44 |
+
game_type,
|
45 |
+
api_key,
|
46 |
+
model_type=ExtendedModelType.GPT_4,
|
47 |
+
extra_prompt="",
|
48 |
+
temperature=1.0,
|
49 |
+
player_demographic=None,
|
50 |
+
):
|
51 |
+
content = ""
|
52 |
+
if api_key is not None or api_key != "":
|
53 |
+
openai.api_key = api_key
|
54 |
+
else:
|
55 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
56 |
+
extra_prompt += "Your answer needs to include the content about your BELIEF, DESIRE and INTENTION."
|
57 |
+
if "game" in game_type.lower():
|
58 |
+
extra_prompt += "You must end with 'Finally, I will give ___ dollars ' (numbers are required in the spaces)."
|
59 |
+
else:
|
60 |
+
extra_prompt += "You must end with 'Finally, I will choose ___' ('Trust' or 'not Trust' are required in the spaces)."
|
61 |
+
extra_prompt += front
|
62 |
+
|
63 |
+
role = str_mes(role + extra_prompt)
|
64 |
+
if player_demographic is not None:
|
65 |
+
first_message = first_message.replace(
|
66 |
+
"player", player_demographic+" player")
|
67 |
+
first_message = str_mes(first_message)
|
68 |
+
if model_type in [
|
69 |
+
ExtendedModelType.INSTRUCT_GPT,
|
70 |
+
ExtendedModelType.GPT_3_5_TURBO_INSTRUCT,
|
71 |
+
]:
|
72 |
+
message = role.content + first_message.content + extra_prompt
|
73 |
+
final_res = str_mes(gpt3_res(message, model_type.value, temperature))
|
74 |
+
else:
|
75 |
+
role = str_mes(role.content + extra_prompt)
|
76 |
+
model_config = ChatGPTConfig(temperature=temperature)
|
77 |
+
if model_type in [
|
78 |
+
ModelType.VICUNA,
|
79 |
+
ModelType.LLAMA_2,
|
80 |
+
]:
|
81 |
+
open_source_config = dict(
|
82 |
+
model_type=model_type,
|
83 |
+
model_config=OpenSourceConfig(
|
84 |
+
model_path=open_model_path_dict[model_type],
|
85 |
+
server_url="http://localhost:8000/v1",
|
86 |
+
api_params=ChatGPTConfig(temperature=temperature),
|
87 |
+
),
|
88 |
+
)
|
89 |
+
agent = ChatAgent(
|
90 |
+
role, output_language="English", **(open_source_config or {})
|
91 |
+
)
|
92 |
+
else:
|
93 |
+
agent = ChatAgent(
|
94 |
+
role,
|
95 |
+
model_type=model_type,
|
96 |
+
output_language="English",
|
97 |
+
model_config=model_config,
|
98 |
+
)
|
99 |
+
final_all_res = agent.step(first_message)
|
100 |
+
final_res = final_all_res.msg
|
101 |
+
content += final_res.content
|
102 |
+
|
103 |
+
return content
|
104 |
+
|
105 |
+
|
106 |
+
sys.path.append("../..")
|
107 |
+
|
108 |
+
file_path_character_info = 'prompt/character_2.json'
|
109 |
+
file_path_game_prompts = 'prompt/person_all_game_prompt.json'
|
110 |
+
|
111 |
+
with open(file_path_character_info, 'r') as file:
|
112 |
+
character_info = json.load(file)
|
113 |
+
|
114 |
+
# Load game prompts
|
115 |
+
with open(file_path_game_prompts, 'r') as file:
|
116 |
+
game_prompts = json.load(file)
|
117 |
+
|
118 |
+
# Extract character names and information
|
119 |
+
characters = [f'Trustor Persona {i}' for i in range(
|
120 |
+
1, len(character_info) + 1)]
|
121 |
+
character_info = {f'Trustor Persona {i}': info for i, info in enumerate(
|
122 |
+
character_info.values(), start=1)}
|
123 |
+
|
124 |
+
# Extract game names and prompts
|
125 |
+
game_prompts = {
|
126 |
+
prompt[0]: prompt[-1] for i, prompt in enumerate(game_prompts.values(), start=1)}
|
127 |
+
games = list(game_prompts.keys())
|
128 |
+
print(games)
|
129 |
+
|
130 |
+
model_dict = {
|
131 |
+
'gpt-3.5-turbo-0613': ExtendedModelType.GPT_3_5_TURBO_0613,
|
132 |
+
'gpt-3.5-turbo-16k-0613': ExtendedModelType.GPT_3_5_TURBO_16K_0613,
|
133 |
+
'gpt-4': ExtendedModelType.GPT_4,
|
134 |
+
'text-davinci-003': ExtendedModelType.INSTRUCT_GPT,
|
135 |
+
'gpt-3.5-turbo-instruct': ExtendedModelType.GPT_3_5_TURBO_INSTRUCT,
|
136 |
+
# 'vicuna': ModelType.VICUNA,
|
137 |
+
# 'llama-2': ModelType.LLAMA_2,
|
138 |
+
}
|
139 |
+
game_tree_images = {
|
140 |
+
"Dictator_Game": "game_tree/dictator_game_game_tree.png",
|
141 |
+
"Trust_Game": "game_tree/Trust_game_game_tree.png",
|
142 |
+
"map_risky_dictator_problem": "game_tree/risky_dictator_game_game_tree.png",
|
143 |
+
"map_trust_problem": "game_tree/map_trust_game_game_tree.png",
|
144 |
+
"lottery_problem_people": "game_tree/lottery_people_game_tree.png",
|
145 |
+
"lottery_problem_gamble": "game_tree/lottery_gamble_game_tree.png"
|
146 |
+
}
|
147 |
+
|
148 |
+
models = list(model_dict.keys())
|
149 |
+
|
150 |
+
|
151 |
+
def update_char_info(char):
|
152 |
+
return character_info.get(char, "No information available.")
|
153 |
+
|
154 |
+
|
155 |
+
def update_game_prompt(game):
|
156 |
+
return game_prompts.get(game, "No prompt available.")
|
157 |
+
|
158 |
+
|
159 |
+
def process_submission(character, game, api_key=None, model="gpt-3.5-turbo-0613", extra_prompt="", temperature=1.0, player_demographic=None,):
|
160 |
+
if api_key is None or api_key == "":
|
161 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
162 |
+
else:
|
163 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
164 |
+
return get_res_for_visible(character_info.get(character, ""), game_prompts.get(game, "No prompt available."), game, api_key, model_dict[model], extra_prompt, temperature, player_demographic)
|
165 |
+
|
166 |
+
|
167 |
+
def update_game_image(game_name):
|
168 |
+
|
169 |
+
image_path = game_tree_images.get(game_name, None)
|
170 |
+
|
171 |
+
return image_path
|
172 |
+
|
173 |
+
|
174 |
+
with gr.Blocks() as app:
|
175 |
+
game_introduction = gr.Textbox(
|
176 |
+
label="Instruction", value="""1. You should select the persona for the trustor and the type of game.\n
|
177 |
+
2. You need to fill in your OpenAI API Key.\n
|
178 |
+
3. If you fill in 'Extra Prompt for Trustor', this prompt will be the additional system prompt to the trustor.\n
|
179 |
+
4. You can fill in the trustee player's demographics, such as race or gender.\n
|
180 |
+
5. If you want reset the conversation, please refresh this page.""")
|
181 |
+
with gr.Row():
|
182 |
+
char_dropdown = gr.Dropdown(
|
183 |
+
choices=characters, label="Select Trustor Persona", value=characters[0])
|
184 |
+
game_dropdown = gr.Dropdown(
|
185 |
+
choices=games, label="Select Game")
|
186 |
+
char_info_display = gr.Textbox(
|
187 |
+
label="Trustor Persona Info", value=character_info[characters[0]])
|
188 |
+
with gr.Row():
|
189 |
+
game_prompt_display = gr.Textbox(
|
190 |
+
label="Game Prompt", value=game_prompts["Trust_Game"])
|
191 |
+
game_image_display = gr.Image(
|
192 |
+
label="Game Image")
|
193 |
+
|
194 |
+
api_key_input = gr.Textbox(
|
195 |
+
label="OpenAI API Key", placeholder="Enter your OpenAI API Key here")
|
196 |
+
model_dropdown = gr.Dropdown(
|
197 |
+
choices=models, label="Select Model", value=models[0])
|
198 |
+
extra_prompt_input = gr.Textbox(
|
199 |
+
label="Extra Prompt for Trustor", placeholder="Enter any additional prompt here (Optional)")
|
200 |
+
temperature_slider = gr.Slider(
|
201 |
+
minimum=0.0, maximum=1.0, step=0.01, label="Temperature", value=1.0)
|
202 |
+
player_demographic_input = gr.Textbox(
|
203 |
+
label="Trustee Player Demographic", placeholder="Enter trustee player demographic info here (Optional)")
|
204 |
+
submit_button = gr.Button("Submit")
|
205 |
+
result_display = gr.Textbox(label="Result")
|
206 |
+
|
207 |
+
# 更新显示信息
|
208 |
+
char_dropdown.change(
|
209 |
+
update_char_info, inputs=char_dropdown, outputs=char_info_display)
|
210 |
+
game_dropdown.change(update_game_prompt,
|
211 |
+
inputs=game_dropdown, outputs=game_prompt_display)
|
212 |
+
game_dropdown.change(
|
213 |
+
update_game_image, inputs=game_dropdown, outputs=game_image_display)
|
214 |
+
|
215 |
+
submit_button.click(
|
216 |
+
process_submission,
|
217 |
+
inputs=[char_dropdown, game_dropdown, api_key_input, model_dropdown,
|
218 |
+
extra_prompt_input, temperature_slider, player_demographic_input],
|
219 |
+
outputs=result_display
|
220 |
+
)
|
221 |
+
|
222 |
+
app.launch()
|
.history/app_20240222100652.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
import openai
|
8 |
+
from camel.agents import ChatAgent
|
9 |
+
from camel.configs import ChatGPTConfig, OpenSourceConfig
|
10 |
+
from camel.messages import BaseMessage
|
11 |
+
from camel.types import ModelType, RoleType
|
12 |
+
|
13 |
+
from exp_model_class import ExtendedModelType
|
14 |
+
|
15 |
+
open_model_path_dict = {
|
16 |
+
ModelType.VICUNA: "lmsys/vicuna-7b-v1.3",
|
17 |
+
ModelType.LLAMA_2: "meta-llama/Llama-2-7b-chat-hf",
|
18 |
+
}
|
19 |
+
front = "you are a person not an ai model."
|
20 |
+
|
21 |
+
|
22 |
+
def str_mes(content):
|
23 |
+
return BaseMessage(
|
24 |
+
role_name="player",
|
25 |
+
role_type=RoleType.USER,
|
26 |
+
meta_dict={},
|
27 |
+
content=content,
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
def gpt3_res(prompt, model_name="text-davinci-003", temperature=1):
|
32 |
+
response = openai.completions.create(
|
33 |
+
model=model_name,
|
34 |
+
prompt=prompt,
|
35 |
+
temperature=temperature,
|
36 |
+
max_tokens=1500,
|
37 |
+
)
|
38 |
+
return response.choices[0].text.strip()
|
39 |
+
|
40 |
+
|
41 |
+
def get_res_for_visible(
|
42 |
+
role,
|
43 |
+
first_message,
|
44 |
+
game_type,
|
45 |
+
api_key,
|
46 |
+
model_type=ExtendedModelType.GPT_4,
|
47 |
+
extra_prompt="",
|
48 |
+
temperature=1.0,
|
49 |
+
player_demographic=None,
|
50 |
+
):
|
51 |
+
content = ""
|
52 |
+
if api_key is not None or api_key != "":
|
53 |
+
openai.api_key = api_key
|
54 |
+
else:
|
55 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
56 |
+
extra_prompt += "Your answer needs to include the content about your BELIEF, DESIRE and INTENTION."
|
57 |
+
if "game" in game_type.lower():
|
58 |
+
extra_prompt += "You must end with 'Finally, I will give ___ dollars ' (numbers are required in the spaces)."
|
59 |
+
else:
|
60 |
+
extra_prompt += "You must end with 'Finally, I will choose ___' ('Trust' or 'not Trust' are required in the spaces)."
|
61 |
+
extra_prompt += front
|
62 |
+
|
63 |
+
role = str_mes(role + extra_prompt)
|
64 |
+
if player_demographic is not None:
|
65 |
+
first_message = first_message.replace(
|
66 |
+
"player", player_demographic+" player")
|
67 |
+
first_message = str_mes(first_message)
|
68 |
+
if model_type in [
|
69 |
+
ExtendedModelType.INSTRUCT_GPT,
|
70 |
+
ExtendedModelType.GPT_3_5_TURBO_INSTRUCT,
|
71 |
+
]:
|
72 |
+
message = role.content + first_message.content + extra_prompt
|
73 |
+
final_res = str_mes(gpt3_res(message, model_type.value, temperature))
|
74 |
+
else:
|
75 |
+
role = str_mes(role.content + extra_prompt)
|
76 |
+
model_config = ChatGPTConfig(temperature=temperature)
|
77 |
+
if model_type in [
|
78 |
+
ModelType.VICUNA,
|
79 |
+
ModelType.LLAMA_2,
|
80 |
+
]:
|
81 |
+
open_source_config = dict(
|
82 |
+
model_type=model_type,
|
83 |
+
model_config=OpenSourceConfig(
|
84 |
+
model_path=open_model_path_dict[model_type],
|
85 |
+
server_url="http://localhost:8000/v1",
|
86 |
+
api_params=ChatGPTConfig(temperature=temperature),
|
87 |
+
),
|
88 |
+
)
|
89 |
+
agent = ChatAgent(
|
90 |
+
role, output_language="English", **(open_source_config or {})
|
91 |
+
)
|
92 |
+
else:
|
93 |
+
agent = ChatAgent(
|
94 |
+
role,
|
95 |
+
model_type=model_type,
|
96 |
+
output_language="English",
|
97 |
+
model_config=model_config,
|
98 |
+
)
|
99 |
+
final_all_res = agent.step(first_message)
|
100 |
+
final_res = final_all_res.msg
|
101 |
+
content += final_res.content
|
102 |
+
|
103 |
+
return content
|
104 |
+
|
105 |
+
|
106 |
+
sys.path.append("../..")
|
107 |
+
|
108 |
+
file_path_character_info = 'prompt/character_2.json'
|
109 |
+
file_path_game_prompts = 'prompt/person_all_game_prompt.json'
|
110 |
+
|
111 |
+
with open(file_path_character_info, 'r') as file:
|
112 |
+
character_info = json.load(file)
|
113 |
+
|
114 |
+
# Load game prompts
|
115 |
+
with open(file_path_game_prompts, 'r') as file:
|
116 |
+
game_prompts = json.load(file)
|
117 |
+
|
118 |
+
# Extract character names and information
|
119 |
+
characters = [f'Trustor Persona {i}' for i in range(
|
120 |
+
1, len(character_info) + 1)]
|
121 |
+
character_info = {f'Trustor Persona {i}': info for i, info in enumerate(
|
122 |
+
character_info.values(), start=1)}
|
123 |
+
|
124 |
+
# Extract game names and prompts
|
125 |
+
game_prompts = {
|
126 |
+
prompt[0]: prompt[-1] for i, prompt in enumerate(game_prompts.values(), start=1)}
|
127 |
+
games = list(game_prompts.keys())
|
128 |
+
print(games)
|
129 |
+
|
130 |
+
model_dict = {
|
131 |
+
'gpt-3.5-turbo-0613': ExtendedModelType.GPT_3_5_TURBO_0613,
|
132 |
+
'gpt-3.5-turbo-16k-0613': ExtendedModelType.GPT_3_5_TURBO_16K_0613,
|
133 |
+
'gpt-4': ExtendedModelType.GPT_4,
|
134 |
+
'text-davinci-003': ExtendedModelType.INSTRUCT_GPT,
|
135 |
+
'gpt-3.5-turbo-instruct': ExtendedModelType.GPT_3_5_TURBO_INSTRUCT,
|
136 |
+
# 'vicuna': ModelType.VICUNA,
|
137 |
+
# 'llama-2': ModelType.LLAMA_2,
|
138 |
+
}
|
139 |
+
game_tree_images = {
|
140 |
+
"Dictator_Game": "game_tree/dictator_game_game_tree.png",
|
141 |
+
"Trust_Game": "game_tree/Trust_game_game_tree.png",
|
142 |
+
"map_risky_dictator_problem": "game_tree/risky_dictator_game_game_tree.png",
|
143 |
+
"map_trust_problem": "game_tree/map_trust_game_game_tree.png",
|
144 |
+
"lottery_problem_people": "game_tree/lottery_people_game_tree.png",
|
145 |
+
"lottery_problem_gamble": "game_tree/lottery_gamble_game_tree.png"
|
146 |
+
}
|
147 |
+
|
148 |
+
models = list(model_dict.keys())
|
149 |
+
|
150 |
+
|
151 |
+
def update_char_info(char):
|
152 |
+
return character_info.get(char, "No information available.")
|
153 |
+
|
154 |
+
|
155 |
+
def update_game_prompt(game):
|
156 |
+
return game_prompts.get(game, "No prompt available.")
|
157 |
+
|
158 |
+
|
159 |
+
def process_submission(character, game, api_key=None, model="gpt-3.5-turbo-0613", extra_prompt="", temperature=1.0, player_demographic=None,):
|
160 |
+
if api_key is None or api_key == "":
|
161 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
162 |
+
else:
|
163 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
164 |
+
return get_res_for_visible(character_info.get(character, ""), game_prompts.get(game, "No prompt available."), game, api_key, model_dict[model], extra_prompt, temperature, player_demographic)
|
165 |
+
|
166 |
+
|
167 |
+
def update_game_image(game_name):
|
168 |
+
|
169 |
+
image_path = game_tree_images.get(game_name, None)
|
170 |
+
|
171 |
+
return image_path
|
172 |
+
|
173 |
+
|
174 |
+
with gr.Blocks() as app:
|
175 |
+
game_introduction = gr.Textbox(
|
176 |
+
label="Instruction", value="""1. You should select the persona for the trustor and the type of game.\n
|
177 |
+
2. You need to fill in your OpenAI API Key.\n
|
178 |
+
3. If you fill in 'Extra Prompt for Trustor', this prompt will be the additional system prompt to the trustor.\n
|
179 |
+
4. You can fill in the trustee player's demographics, such as race or gender.\n
|
180 |
+
5. If you want reset the conversation, please refresh this page.""")
|
181 |
+
with gr.Row():
|
182 |
+
char_dropdown = gr.Dropdown(
|
183 |
+
choices=characters, label="Select Trustor Persona", value=characters[0])
|
184 |
+
game_dropdown = gr.Dropdown(
|
185 |
+
choices=games, label="Select Game")
|
186 |
+
char_info_display = gr.Textbox(
|
187 |
+
label="Trustor Persona Info", value=character_info[characters[0]])
|
188 |
+
with gr.Row():
|
189 |
+
game_prompt_display = gr.Textbox(
|
190 |
+
label="Game Prompt", value=game_prompts["Trust_Game"])
|
191 |
+
game_image_display = gr.Image(
|
192 |
+
label="Game Image")
|
193 |
+
|
194 |
+
api_key_input = gr.Textbox(
|
195 |
+
label="OpenAI API Key", placeholder="Enter your OpenAI API Key here")
|
196 |
+
model_dropdown = gr.Dropdown(
|
197 |
+
choices=models, label="Select Model", value=models[0])
|
198 |
+
extra_prompt_input = gr.Textbox(
|
199 |
+
label="Extra Prompt for Trustor", placeholder="Enter any additional prompt here (Optional)")
|
200 |
+
temperature_slider = gr.Slider(
|
201 |
+
minimum=0.0, maximum=1.0, step=0.01, label="Temperature", value=1.0)
|
202 |
+
player_demographic_input = gr.Textbox(
|
203 |
+
label="Trustee Player Demographic", placeholder="Enter trustee player demographic info here (Optional)")
|
204 |
+
submit_button = gr.Button("Submit")
|
205 |
+
result_display = gr.Textbox(label="Result")
|
206 |
+
|
207 |
+
# 更新显示信息
|
208 |
+
char_dropdown.change(
|
209 |
+
update_char_info, inputs=char_dropdown, outputs=char_info_display)
|
210 |
+
game_dropdown.change(update_game_prompt,
|
211 |
+
inputs=game_dropdown, outputs=game_prompt_display)
|
212 |
+
game_dropdown.change(
|
213 |
+
update_game_image, inputs=game_dropdown, outputs=game_image_display)
|
214 |
+
|
215 |
+
submit_button.click(
|
216 |
+
process_submission,
|
217 |
+
inputs=[char_dropdown, game_dropdown, api_key_input, model_dropdown,
|
218 |
+
extra_prompt_input, temperature_slider, player_demographic_input],
|
219 |
+
outputs=result_display
|
220 |
+
)
|
221 |
+
|
222 |
+
app.launch()
|
app.py
CHANGED
@@ -174,9 +174,10 @@ def update_game_image(game_name):
|
|
174 |
with gr.Blocks() as app:
|
175 |
game_introduction = gr.Textbox(
|
176 |
label="Instruction", value="""1. You should select the persona for the trustor and the type of game.\n
|
177 |
-
2.
|
178 |
-
3.
|
179 |
-
4.
|
|
|
180 |
with gr.Row():
|
181 |
char_dropdown = gr.Dropdown(
|
182 |
choices=characters, label="Select Trustor Persona", value=characters[0])
|
|
|
174 |
with gr.Blocks() as app:
|
175 |
game_introduction = gr.Textbox(
|
176 |
label="Instruction", value="""1. You should select the persona for the trustor and the type of game.\n
|
177 |
+
2. You need to fill in your OpenAI API Key.\n
|
178 |
+
3. If you fill in 'Extra Prompt for Trustor', this prompt will be the additional system prompt to the trustor.\n
|
179 |
+
4. You can fill in the trustee player's demographics, such as race or gender.\n
|
180 |
+
5. If you want reset the conversation, please refresh this page.""")
|
181 |
with gr.Row():
|
182 |
char_dropdown = gr.Dropdown(
|
183 |
choices=characters, label="Select Trustor Persona", value=characters[0])
|