dnth commited on
Commit
606cf89
·
1 Parent(s): 024418c
Files changed (1) hide show
  1. app.py +53 -53
app.py CHANGED
@@ -1,74 +1,74 @@
1
  import cv2
2
- # from icevision.all import *
3
- # import icedata
4
- # import PIL, requests
5
- # import torch
6
- # from torchvision import transforms
7
- # import gradio as gr
8
 
9
- # # Download the dataset
10
- # url = "https://cvbp-secondary.z19.web.core.windows.net/datasets/object_detection/odFridgeObjects.zip"
11
- # dest_dir = "fridge"
12
- # data_dir = icedata.load_data(url, dest_dir)
13
 
14
- # # Create the parser
15
- # parser = parsers.VOCBBoxParser(annotations_dir=data_dir / "odFridgeObjects/annotations", images_dir=data_dir / "odFridgeObjects/images")
16
 
17
- # # Parse annotations to create records
18
- # train_records, valid_records = parser.parse()
19
 
20
- # class_map = parser.class_map
21
 
22
- # extra_args = {}
23
- # model_type = models.torchvision.retinanet
24
- # backbone = model_type.backbones.resnet50_fpn
25
- # # Instantiate the model
26
- # model = model_type.model(backbone=backbone(pretrained=True), num_classes=len(parser.class_map), **extra_args)
27
 
28
- # # Transforms
29
- # # size is set to 384 because EfficientDet requires its inputs to be divisible by 128
30
- # image_size = 384
31
- # train_tfms = tfms.A.Adapter([*tfms.A.aug_tfms(size=image_size, presize=512), tfms.A.Normalize()])
32
- # valid_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(image_size), tfms.A.Normalize()])
33
- # # Datasets
34
- # train_ds = Dataset(train_records, train_tfms)
35
- # valid_ds = Dataset(valid_records, valid_tfms)
36
- # # Data Loaders
37
- # train_dl = model_type.train_dl(train_ds, batch_size=8, num_workers=4, shuffle=True)
38
- # valid_dl = model_type.valid_dl(valid_ds, batch_size=8, num_workers=4, shuffle=False)
39
- # metrics = [COCOMetric(metric_type=COCOMetricType.bbox)]
40
- # learn = model_type.fastai.learner(dls=[train_dl, valid_dl], model=model, metrics=metrics)
41
 
42
- # learn = learn.load('model')
43
 
44
- # def show_preds(input_image, display_label, display_bbox, detection_threshold):
45
 
46
- # if detection_threshold==0: detection_threshold=0.5
47
 
48
- # img = PIL.Image.fromarray(input_image, 'RGB')
49
 
50
- # pred_dict = model_type.end2end_detect(img, valid_tfms, model, class_map=class_map, detection_threshold=detection_threshold,
51
- # display_label=display_label, display_bbox=display_bbox, return_img=True,
52
- # font_size=16, label_color="#FF59D6")
53
 
54
- # return pred_dict['img']
55
 
56
- # # display_chkbox = gr.inputs.CheckboxGroup(["Label", "BBox"], label="Display", default=True)
57
- # display_chkbox_label = gr.inputs.Checkbox(label="Label", default=True)
58
- # display_chkbox_box = gr.inputs.Checkbox(label="Box", default=True)
59
 
60
- # detection_threshold_slider = gr.inputs.Slider(minimum=0, maximum=1, step=0.1, default=0.5, label="Detection Threshold")
61
 
62
- # outputs = gr.outputs.Image(type="pil")
63
 
64
- # # Option 1: Get an image from local drive
65
- # gr_interface = gr.Interface(fn=show_preds, inputs=["image", display_chkbox_label, display_chkbox_box, detection_threshold_slider], outputs=outputs, title='IceApp - Fridge Object')
66
 
67
- # # # Option 2: Grab an image from a webcam
68
- # # gr_interface = gr.Interface(fn=show_preds, inputs=["webcam", display_chkbox_label, display_chkbox_box, detection_threshold_slider], outputs=outputs, title='IceApp - COCO', live=False)
69
 
70
- # # # Option 3: Continuous image stream from the webcam
71
- # # gr_interface = gr.Interface(fn=show_preds, inputs=["webcam", display_chkbox_label, display_chkbox_box, detection_threshold_slider], outputs=outputs, title='IceApp - COCO', live=True)
72
 
73
 
74
- # gr_interface.launch(inline=False, share=True, debug=True)
 
1
  import cv2
2
+ from icevision.all import *
3
+ import icedata
4
+ import PIL, requests
5
+ import torch
6
+ from torchvision import transforms
7
+ import gradio as gr
8
 
9
+ # Download the dataset
10
+ url = "https://cvbp-secondary.z19.web.core.windows.net/datasets/object_detection/odFridgeObjects.zip"
11
+ dest_dir = "fridge"
12
+ data_dir = icedata.load_data(url, dest_dir)
13
 
14
+ # Create the parser
15
+ parser = parsers.VOCBBoxParser(annotations_dir=data_dir / "odFridgeObjects/annotations", images_dir=data_dir / "odFridgeObjects/images")
16
 
17
+ # Parse annotations to create records
18
+ train_records, valid_records = parser.parse()
19
 
20
+ class_map = parser.class_map
21
 
22
+ extra_args = {}
23
+ model_type = models.torchvision.retinanet
24
+ backbone = model_type.backbones.resnet50_fpn
25
+ # Instantiate the model
26
+ model = model_type.model(backbone=backbone(pretrained=True), num_classes=len(parser.class_map), **extra_args)
27
 
28
+ # Transforms
29
+ # size is set to 384 because EfficientDet requires its inputs to be divisible by 128
30
+ image_size = 384
31
+ train_tfms = tfms.A.Adapter([*tfms.A.aug_tfms(size=image_size, presize=512), tfms.A.Normalize()])
32
+ valid_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(image_size), tfms.A.Normalize()])
33
+ # Datasets
34
+ train_ds = Dataset(train_records, train_tfms)
35
+ valid_ds = Dataset(valid_records, valid_tfms)
36
+ # Data Loaders
37
+ train_dl = model_type.train_dl(train_ds, batch_size=8, num_workers=4, shuffle=True)
38
+ valid_dl = model_type.valid_dl(valid_ds, batch_size=8, num_workers=4, shuffle=False)
39
+ metrics = [COCOMetric(metric_type=COCOMetricType.bbox)]
40
+ learn = model_type.fastai.learner(dls=[train_dl, valid_dl], model=model, metrics=metrics)
41
 
42
+ learn = learn.load('model')
43
 
44
+ def show_preds(input_image, display_label, display_bbox, detection_threshold):
45
 
46
+ if detection_threshold==0: detection_threshold=0.5
47
 
48
+ img = PIL.Image.fromarray(input_image, 'RGB')
49
 
50
+ pred_dict = model_type.end2end_detect(img, valid_tfms, model, class_map=class_map, detection_threshold=detection_threshold,
51
+ display_label=display_label, display_bbox=display_bbox, return_img=True,
52
+ font_size=16, label_color="#FF59D6")
53
 
54
+ return pred_dict['img']
55
 
56
+ # display_chkbox = gr.inputs.CheckboxGroup(["Label", "BBox"], label="Display", default=True)
57
+ display_chkbox_label = gr.inputs.Checkbox(label="Label", default=True)
58
+ display_chkbox_box = gr.inputs.Checkbox(label="Box", default=True)
59
 
60
+ detection_threshold_slider = gr.inputs.Slider(minimum=0, maximum=1, step=0.1, default=0.5, label="Detection Threshold")
61
 
62
+ outputs = gr.outputs.Image(type="pil")
63
 
64
+ # Option 1: Get an image from local drive
65
+ gr_interface = gr.Interface(fn=show_preds, inputs=["image", display_chkbox_label, display_chkbox_box, detection_threshold_slider], outputs=outputs, title='IceApp - Fridge Object')
66
 
67
+ # # Option 2: Grab an image from a webcam
68
+ # gr_interface = gr.Interface(fn=show_preds, inputs=["webcam", display_chkbox_label, display_chkbox_box, detection_threshold_slider], outputs=outputs, title='IceApp - COCO', live=False)
69
 
70
+ # # Option 3: Continuous image stream from the webcam
71
+ # gr_interface = gr.Interface(fn=show_preds, inputs=["webcam", display_chkbox_label, display_chkbox_box, detection_threshold_slider], outputs=outputs, title='IceApp - COCO', live=True)
72
 
73
 
74
+ gr_interface.launch(inline=False, share=True, debug=True)