Puyush commited on
Commit
7fb148b
1 Parent(s): 9d093be

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -0
app.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from ultralytics import YOLO
3
+ import cv2
4
+
5
+ def check_acc(box):
6
+ res_index_list = box.cls.tolist()
7
+ result = ""
8
+
9
+ for index in res_index_list:
10
+ if index == 1:
11
+ result = "Bike Bike Accident Detected"
12
+ break
13
+ elif index == 2:
14
+ result = "Bike Object Accident Detected"
15
+ break
16
+ elif index == 3:
17
+ result = "Bike Person Accident Detected"
18
+ break
19
+ elif index == 5:
20
+ result = "Car Bike Accident Detected"
21
+ break
22
+ elif index == 6:
23
+ result = "Car Car Accident Detected"
24
+ break
25
+ elif index == 7:
26
+ result = "Car Object Accident Detected"
27
+ break
28
+ elif index == 8:
29
+ result = "Car Person Accident Detected"
30
+ break
31
+
32
+ return result
33
+
34
+ def image_predict(image):
35
+ res = ""
36
+ model_path = "best.pt"
37
+ model = YOLO(model_path)
38
+ results = model.predict(image,conf = 0.6,iou = 0.3,imgsz = 512)
39
+ box = results[0].boxes
40
+ res = check_acc(box)
41
+ annotated_frame = results[0].plot()
42
+ if len(res) >0:
43
+ return (res, annotated_frame)
44
+
45
+ return ("No Class Detected", None)
46
+
47
+ def extract_frames(video):
48
+ vidcap = cv2.VideoCapture(video)
49
+ vidcap = cv2.VideoCapture(video)
50
+ fps = vidcap.get(cv2.CAP_PROP_FPS)
51
+ nof = 4
52
+ frame_no = 0
53
+ while vidcap.isOpened():
54
+ res = ""
55
+ render = None
56
+ success, image = vidcap.read()
57
+
58
+ if success ==False:
59
+ break
60
+
61
+ # Check if it's time to process the frame based on the desired interval
62
+ if (frame_no % (int(fps / nof))) == 0:
63
+ model_path = "best.pt"
64
+ model = YOLO(model_path)
65
+ results = model.predict(image,conf = 0.6,iou = 0.3,imgsz = 512)
66
+ box = results[0].boxes
67
+ res = check_acc(box)
68
+ annotated_frame = results[0].plot()
69
+
70
+ if len(res) >0:
71
+ return (res, annotated_frame)
72
+
73
+ frame_no += 1 # Increment frame number
74
+
75
+ return ("No Class Detected", None)
76
+
77
+
78
+ def take_input(image, video):
79
+ if(video != None):
80
+ res = extract_frames(video)
81
+ else:
82
+ res = image_predict(image)
83
+ return res
84
+
85
+
86
+ with gr.Blocks(title="YOLOS Object Detection", css=".gradio-container {background:lightyellow;}") as demo:
87
+ gr.HTML('<h1>Accident Detection Using Yolov8</h1>')
88
+ gr.HTML("<br>")
89
+ with gr.Row():
90
+ input_image = gr.Image(label="Input image")
91
+ input_video = gr.Video(label="Input video")
92
+ output_label = gr.Text(label="output label")
93
+ output_image = gr.Image(label="Output image")
94
+ gr.HTML("<br>")
95
+ send_btn = gr.Button("Detect")
96
+ gr.HTML("<br>")
97
+
98
+ send_btn.click(fn=take_input, inputs=[input_image, input_video], outputs=[output_label, output_image])
99
+
100
+ demo.launch(debug=True)