Spaces:
Sleeping
Sleeping
Tuchuanhuhuhu
commited on
Commit
•
c87ba04
1
Parent(s):
883ed13
改进索引逻辑
Browse files- llama_func.py +3 -4
llama_func.py
CHANGED
@@ -123,8 +123,8 @@ def chat_ai(
|
|
123 |
response = response
|
124 |
|
125 |
context.append({"role": "user", "content": question})
|
126 |
-
context.append({"role": "assistant", "content":
|
127 |
-
chatbot.append((question,
|
128 |
|
129 |
os.environ["OPENAI_API_KEY"] = ""
|
130 |
return context, chatbot, status_text
|
@@ -167,14 +167,13 @@ def ask_ai(
|
|
167 |
if response is not None:
|
168 |
logging.info(f"Response: {response}")
|
169 |
ret_text = response.response
|
170 |
-
ret_text += "\n----------\n"
|
171 |
nodes = []
|
172 |
for index, node in enumerate(response.source_nodes):
|
173 |
brief = node.source_text[:25].replace("\n", "")
|
174 |
nodes.append(
|
175 |
f"<details><summary>[{index+1}]\t{brief}...</summary><p>{node.source_text}</p></details>"
|
176 |
)
|
177 |
-
new_response = ret_text + "\n\n".join(nodes)
|
178 |
logging.info(
|
179 |
f"Response: {colorama.Fore.BLUE}{ret_text}{colorama.Style.RESET_ALL}"
|
180 |
)
|
|
|
123 |
response = response
|
124 |
|
125 |
context.append({"role": "user", "content": question})
|
126 |
+
context.append({"role": "assistant", "content": response})
|
127 |
+
chatbot.append((question, chatbot_display))
|
128 |
|
129 |
os.environ["OPENAI_API_KEY"] = ""
|
130 |
return context, chatbot, status_text
|
|
|
167 |
if response is not None:
|
168 |
logging.info(f"Response: {response}")
|
169 |
ret_text = response.response
|
|
|
170 |
nodes = []
|
171 |
for index, node in enumerate(response.source_nodes):
|
172 |
brief = node.source_text[:25].replace("\n", "")
|
173 |
nodes.append(
|
174 |
f"<details><summary>[{index+1}]\t{brief}...</summary><p>{node.source_text}</p></details>"
|
175 |
)
|
176 |
+
new_response = ret_text + "\n----------\n" + "\n\n".join(nodes)
|
177 |
logging.info(
|
178 |
f"Response: {colorama.Fore.BLUE}{ret_text}{colorama.Style.RESET_ALL}"
|
179 |
)
|