Update app.py
Browse files
app.py
CHANGED
@@ -11,8 +11,11 @@ from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_sc
|
|
11 |
import requests
|
12 |
from datasets import load_dataset
|
13 |
import os
|
14 |
-
from logging
|
15 |
-
|
|
|
|
|
|
|
16 |
|
17 |
|
18 |
# Ensure the log files exist
|
@@ -20,10 +23,10 @@ log_file_path = 'chat_log.log'
|
|
20 |
debug_log_file_path = 'debug.log'
|
21 |
if not os.path.exists(log_file_path):
|
22 |
with open(log_file_path, 'w') as f:
|
23 |
-
f.write("")
|
24 |
if not os.path.exists(debug_log_file_path):
|
25 |
with open(debug_log_file_path, 'w') as f:
|
26 |
-
f.write("")
|
27 |
|
28 |
|
29 |
# Create logger instance
|
@@ -35,13 +38,11 @@ formatter = logging.Formatter(
|
|
35 |
'%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
|
36 |
|
37 |
# Create handlers
|
38 |
-
info_handler =
|
39 |
-
filename=log_file_path, mode='w', maxBytes=5*1024*1024, backupCount=2)
|
40 |
info_handler.setLevel(logging.INFO)
|
41 |
info_handler.setFormatter(formatter)
|
42 |
|
43 |
-
debug_handler =
|
44 |
-
filename=debug_log_file_path, mode='w', maxBytes=5*1024*1024, backupCount=2)
|
45 |
debug_handler.setLevel(logging.DEBUG)
|
46 |
debug_handler.setFormatter(formatter)
|
47 |
|
@@ -185,8 +186,8 @@ def stress_test(num_requests, message, delay):
|
|
185 |
"data": [message],
|
186 |
"fn_index": 0 # This might need to be updated based on your Gradio app's function index
|
187 |
})
|
188 |
-
logger.debug(f"Request payload: {message}")
|
189 |
-
logger.debug(f"Response: {response.json()}")
|
190 |
except Exception as e:
|
191 |
logger.debug(f"Error during stress test request: {e}", exc_info=True)
|
192 |
|
@@ -200,14 +201,10 @@ def stress_test(num_requests, message, delay):
|
|
200 |
for t in threads:
|
201 |
t.join()
|
202 |
|
|
|
203 |
# --- Gradio Interface with Background Image and Three Windows ---
|
204 |
-
with gr.Blocks(
|
205 |
-
|
206 |
-
background-image: url("stag.jpeg");
|
207 |
-
background-size: cover;
|
208 |
-
background-repeat: no-repeat;
|
209 |
-
}
|
210 |
-
""", title="PLOD Filtered with Monitoring") as demo: # Load CSS for background image
|
211 |
with gr.Tab("Sentence input"):
|
212 |
gr.Markdown("## Chat with the Bot")
|
213 |
index_input = gr.Textbox(label="Enter A sentence:", lines=1)
|
@@ -250,7 +247,9 @@ body {
|
|
250 |
stress_test_status.value = f"Stress test failed: {e}"
|
251 |
|
252 |
stress_test_button.click(run_stress_test, [num_requests_input, index_input_stress, delay_input], stress_test_status)
|
253 |
-
|
|
|
|
|
254 |
# --- Update Functions ---
|
255 |
def update_metrics(request_count_display, avg_latency_display):
|
256 |
while True:
|
@@ -274,15 +273,10 @@ body {
|
|
274 |
def update_logs(logs_display):
|
275 |
while True:
|
276 |
info_log_vector = []
|
277 |
-
# with open('debug.log', "r") as log_file_handler:
|
278 |
-
# for line in log_file_handler: # Skip empty lines
|
279 |
-
# info_log_vector.append(line)
|
280 |
-
# debugger.debug(info_log_vector)
|
281 |
-
# logs_display.value = info_log_vector # Display last 10 lines
|
282 |
logs = []
|
283 |
while not logs_queue.empty():
|
284 |
logs.append(logs_queue.get())
|
285 |
-
logs_display.value = "\n".join(logs)
|
286 |
time.sleep(1) # Update every 1 second
|
287 |
|
288 |
def display_model_params(model_params_display):
|
@@ -301,7 +295,7 @@ body {
|
|
301 |
threading.Thread(target=start_http_server, args=(8000,), daemon=True).start()
|
302 |
threading.Thread(target=update_metrics, args=(request_count_display, avg_latency_display), daemon=True).start()
|
303 |
threading.Thread(target=update_usage, args=(cpu_usage_display, mem_usage_display), daemon=True).start()
|
304 |
-
threading.Thread(target=update_logs, args=(logs_display
|
305 |
threading.Thread(target=display_model_params, args=(model_params_display,), daemon=True).start()
|
306 |
threading.Thread(target=update_queue_length, daemon=True).start()
|
307 |
|
|
|
11 |
import requests
|
12 |
from datasets import load_dataset
|
13 |
import os
|
14 |
+
from logging import FileHandler
|
15 |
+
from __future__ import annotations
|
16 |
+
from typing import Iterable
|
17 |
+
from gradio.themes.base import Base
|
18 |
+
from gradio.themes.utils import colors, fonts, sizes
|
19 |
|
20 |
|
21 |
# Ensure the log files exist
|
|
|
23 |
debug_log_file_path = 'debug.log'
|
24 |
if not os.path.exists(log_file_path):
|
25 |
with open(log_file_path, 'w') as f:
|
26 |
+
f.write(" ")
|
27 |
if not os.path.exists(debug_log_file_path):
|
28 |
with open(debug_log_file_path, 'w') as f:
|
29 |
+
f.write(" ")
|
30 |
|
31 |
|
32 |
# Create logger instance
|
|
|
38 |
'%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
|
39 |
|
40 |
# Create handlers
|
41 |
+
info_handler = FileHandler( filename=log_file_path, mode='w+')
|
|
|
42 |
info_handler.setLevel(logging.INFO)
|
43 |
info_handler.setFormatter(formatter)
|
44 |
|
45 |
+
debug_handler = FileHandler(filename=debug_log_file_path, mode='w+')
|
|
|
46 |
debug_handler.setLevel(logging.DEBUG)
|
47 |
debug_handler.setFormatter(formatter)
|
48 |
|
|
|
186 |
"data": [message],
|
187 |
"fn_index": 0 # This might need to be updated based on your Gradio app's function index
|
188 |
})
|
189 |
+
logger.debug(f"Request payload: {message}",exc_info=True)
|
190 |
+
logger.debug(f"Response: {response.json()}",exc_info=True)
|
191 |
except Exception as e:
|
192 |
logger.debug(f"Error during stress test request: {e}", exc_info=True)
|
193 |
|
|
|
201 |
for t in threads:
|
202 |
t.join()
|
203 |
|
204 |
+
|
205 |
# --- Gradio Interface with Background Image and Three Windows ---
|
206 |
+
with gr.Blocks(title="PLOD Filtered with Monitoring") as demo: # Load CSS for background image
|
207 |
+
|
|
|
|
|
|
|
|
|
|
|
208 |
with gr.Tab("Sentence input"):
|
209 |
gr.Markdown("## Chat with the Bot")
|
210 |
index_input = gr.Textbox(label="Enter A sentence:", lines=1)
|
|
|
247 |
stress_test_status.value = f"Stress test failed: {e}"
|
248 |
|
249 |
stress_test_button.click(run_stress_test, [num_requests_input, index_input_stress, delay_input], stress_test_status)
|
250 |
+
img = gr.Image(
|
251 |
+
"stag.jpeg", label="Image"
|
252 |
+
)
|
253 |
# --- Update Functions ---
|
254 |
def update_metrics(request_count_display, avg_latency_display):
|
255 |
while True:
|
|
|
273 |
def update_logs(logs_display):
|
274 |
while True:
|
275 |
info_log_vector = []
|
|
|
|
|
|
|
|
|
|
|
276 |
logs = []
|
277 |
while not logs_queue.empty():
|
278 |
logs.append(logs_queue.get())
|
279 |
+
logs_display.value = "\n".join(logs[-10:])
|
280 |
time.sleep(1) # Update every 1 second
|
281 |
|
282 |
def display_model_params(model_params_display):
|
|
|
295 |
threading.Thread(target=start_http_server, args=(8000,), daemon=True).start()
|
296 |
threading.Thread(target=update_metrics, args=(request_count_display, avg_latency_display), daemon=True).start()
|
297 |
threading.Thread(target=update_usage, args=(cpu_usage_display, mem_usage_display), daemon=True).start()
|
298 |
+
threading.Thread(target=update_logs, args=(logs_display), daemon=True).start()
|
299 |
threading.Thread(target=display_model_params, args=(model_params_display,), daemon=True).start()
|
300 |
threading.Thread(target=update_queue_length, daemon=True).start()
|
301 |
|