Spaces:
Runtime error
Runtime error
giannantonio15
commited on
Commit
•
724e446
1
Parent(s):
5db5929
Update app.py
Browse files
app.py
CHANGED
@@ -112,24 +112,24 @@ css = """
|
|
112 |
|
113 |
"""
|
114 |
|
115 |
-
user_message = ""
|
116 |
-
current_chat_mode = ""
|
117 |
-
current_response_mode = "compact"
|
118 |
-
current_collection = ""
|
119 |
-
file_path = ""
|
120 |
-
num_responses = 0
|
121 |
-
current_chat_mode = "STANDARD"
|
122 |
-
retriever = None
|
123 |
-
token_count_bandi = 0
|
124 |
-
token_count_bandi_sistema_puglia = 0
|
125 |
-
chat_engine_bandi = None
|
126 |
-
chat_engine_bandi_sistema_puglia = None
|
127 |
-
memory_bandi = None
|
128 |
-
memory_bandi_sistema_puglia = None
|
129 |
-
stream_response = None
|
130 |
-
divDocumenti = None
|
131 |
-
|
132 |
def main():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
setGPU()
|
134 |
llm = setLLM()
|
135 |
Settings.llm = llm
|
@@ -419,6 +419,7 @@ def main():
|
|
419 |
print(chat_engine_bandi.chat_history)
|
420 |
print(memory_bandi)
|
421 |
stream_response = None
|
|
|
422 |
stream_response = chat_engine_bandi.stream_chat(user_message)
|
423 |
print("risposta con chat engine")
|
424 |
responseHTML = ""
|
@@ -444,6 +445,7 @@ def main():
|
|
444 |
print(chat_engine_bandi_sistema_puglia.chat_history)
|
445 |
print(memory_bandi_sistema_puglia)
|
446 |
stream_response = None
|
|
|
447 |
stream_response = chat_engine_bandi_sistema_puglia.stream_chat(user_message)
|
448 |
print("risposta con chat engine")
|
449 |
responseHTML = ""
|
@@ -469,6 +471,7 @@ def main():
|
|
469 |
query_engine = None
|
470 |
query_engine = RetrieverQueryEngine(retriever=retriever, response_synthesizer=response_synthesizer)
|
471 |
stream_response = None
|
|
|
472 |
stream_response = query_engine.query(user_message)
|
473 |
print("risposta con query engine")
|
474 |
responseHTML = ""
|
@@ -486,6 +489,7 @@ def main():
|
|
486 |
query_engine = None
|
487 |
query_engine = RetrieverQueryEngine(retriever=retriever, response_synthesizer=response_synthesizer)
|
488 |
stream_response = None
|
|
|
489 |
stream_response = query_engine.query(user_message)
|
490 |
print("risposta con query engine")
|
491 |
responseHTML = ""
|
@@ -504,7 +508,8 @@ def main():
|
|
504 |
torch.cuda.empty_cache()
|
505 |
torch.cuda.reset_max_memory_allocated()
|
506 |
torch.cuda.reset_max_memory_cached()
|
507 |
-
|
|
|
508 |
|
509 |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
510 |
bot, chatbot, [chatbot, divDocumenti]
|
|
|
112 |
|
113 |
"""
|
114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
def main():
|
116 |
+
user_message = ""
|
117 |
+
current_chat_mode = ""
|
118 |
+
current_response_mode = "compact"
|
119 |
+
current_collection = ""
|
120 |
+
file_path = ""
|
121 |
+
num_responses = 0
|
122 |
+
current_chat_mode = "STANDARD"
|
123 |
+
retriever = None
|
124 |
+
token_count_bandi = 0
|
125 |
+
token_count_bandi_sistema_puglia = 0
|
126 |
+
chat_engine_bandi = None
|
127 |
+
chat_engine_bandi_sistema_puglia = None
|
128 |
+
memory_bandi = None
|
129 |
+
memory_bandi_sistema_puglia = None
|
130 |
+
stream_response = None
|
131 |
+
divDocumenti = None
|
132 |
+
|
133 |
setGPU()
|
134 |
llm = setLLM()
|
135 |
Settings.llm = llm
|
|
|
419 |
print(chat_engine_bandi.chat_history)
|
420 |
print(memory_bandi)
|
421 |
stream_response = None
|
422 |
+
print(user_message)
|
423 |
stream_response = chat_engine_bandi.stream_chat(user_message)
|
424 |
print("risposta con chat engine")
|
425 |
responseHTML = ""
|
|
|
445 |
print(chat_engine_bandi_sistema_puglia.chat_history)
|
446 |
print(memory_bandi_sistema_puglia)
|
447 |
stream_response = None
|
448 |
+
print(user_message)
|
449 |
stream_response = chat_engine_bandi_sistema_puglia.stream_chat(user_message)
|
450 |
print("risposta con chat engine")
|
451 |
responseHTML = ""
|
|
|
471 |
query_engine = None
|
472 |
query_engine = RetrieverQueryEngine(retriever=retriever, response_synthesizer=response_synthesizer)
|
473 |
stream_response = None
|
474 |
+
print(user_message)
|
475 |
stream_response = query_engine.query(user_message)
|
476 |
print("risposta con query engine")
|
477 |
responseHTML = ""
|
|
|
489 |
query_engine = None
|
490 |
query_engine = RetrieverQueryEngine(retriever=retriever, response_synthesizer=response_synthesizer)
|
491 |
stream_response = None
|
492 |
+
print(user_message)
|
493 |
stream_response = query_engine.query(user_message)
|
494 |
print("risposta con query engine")
|
495 |
responseHTML = ""
|
|
|
508 |
torch.cuda.empty_cache()
|
509 |
torch.cuda.reset_max_memory_allocated()
|
510 |
torch.cuda.reset_max_memory_cached()
|
511 |
+
|
512 |
+
|
513 |
|
514 |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
515 |
bot, chatbot, [chatbot, divDocumenti]
|