Thiago Hersan commited on
Commit
ae3ffab
1 Parent(s): c7df5d8

reorg app. add description

Browse files
Files changed (1) hide show
  1. app.py +17 -15
app.py CHANGED
@@ -12,12 +12,18 @@ example_images = sorted(glob.glob('examples/map*.jpg'))
12
  ade_mean=[0.485, 0.456, 0.406]
13
  ade_std=[0.229, 0.224, 0.225]
14
 
 
 
 
 
 
15
  palette = [
16
  [120, 120, 120], [4, 200, 4], [4, 4, 250], [6, 230, 230],
17
  [80, 50, 50], [120, 120, 80], [140, 140, 140], [204, 5, 255]
18
  ]
19
 
20
  model_id = f"thiagohersan/maskformer-satellite-trees"
 
21
 
22
  # preprocessor = MaskFormerImageProcessor.from_pretrained(model_id)
23
  preprocessor = MaskFormerImageProcessor(
@@ -31,16 +37,11 @@ preprocessor = MaskFormerImageProcessor(
31
  hf_token = environ.get('HFTOKEN')
32
  model = MaskFormerForInstanceSegmentation.from_pretrained(model_id, use_auth_token=hf_token)
33
 
34
- test_transform = T.Compose([
35
- T.ToTensor(),
36
- T.Normalize(mean=ade_mean, std=ade_std)
37
- ])
38
 
39
- def visualize_instance_seg_mask(img_in, mask, id2label):
40
  img_out = np.zeros((mask.shape[0], mask.shape[1], 3))
41
  image_total_pixels = mask.shape[0] * mask.shape[1]
42
  label_ids = np.unique(mask)
43
- vegetation_labels = ["vegetation"]
44
 
45
  id2color = {id: palette[id] for id in label_ids}
46
  id2count = {id: 0 for id in label_ids}
@@ -56,7 +57,7 @@ def visualize_instance_seg_mask(img_in, mask, id2label):
56
  f"{id2label[id]}",
57
  f"{(100 * id2count[id] / image_total_pixels):.2f} %",
58
  f"{np.sqrt(id2count[id] / image_total_pixels):.2f} m"
59
- ] for id in label_ids if id2label[id] in vegetation_labels]
60
 
61
  if len(dataframe) < 1:
62
  dataframe = [[
@@ -72,27 +73,28 @@ def query_image(image_path):
72
  img = np.array(Image.open(image_path))
73
  img_size = (img.shape[0], img.shape[1])
74
  inputs = preprocessor(images=test_transform(img), return_tensors="pt")
75
-
76
  outputs = model(**inputs)
77
-
78
  results = preprocessor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[img_size])[0]
79
- mask_img, dataframe = visualize_instance_seg_mask(img, results.numpy(), model.config.id2label)
80
  return mask_img, dataframe
81
 
82
 
83
  demo = gr.Interface(
 
 
 
84
  fn=query_image,
85
  inputs=[gr.Image(type="filepath", label="Input Image")],
86
  outputs=[
87
  gr.Image(label="Vegetation"),
88
  gr.DataFrame(label="Info", headers=["Object Label", "Pixel Percent", "Square Length"])
89
  ],
90
- title="Maskformer Satellite+Trees",
91
- allow_flagging="never",
92
- analytics_enabled=None,
93
  examples=example_images,
94
- cache_examples=True
 
 
 
95
  )
96
 
97
- demo.queue(concurrency_count=4, max_size=1)
98
  demo.launch(show_api=False)
 
12
  ade_mean=[0.485, 0.456, 0.406]
13
  ade_std=[0.229, 0.224, 0.225]
14
 
15
+ test_transform = T.Compose([
16
+ T.ToTensor(),
17
+ T.Normalize(mean=ade_mean, std=ade_std)
18
+ ])
19
+
20
  palette = [
21
  [120, 120, 120], [4, 200, 4], [4, 4, 250], [6, 230, 230],
22
  [80, 50, 50], [120, 120, 80], [140, 140, 140], [204, 5, 255]
23
  ]
24
 
25
  model_id = f"thiagohersan/maskformer-satellite-trees"
26
+ vegetation_labels = ["vegetation"]
27
 
28
  # preprocessor = MaskFormerImageProcessor.from_pretrained(model_id)
29
  preprocessor = MaskFormerImageProcessor(
 
37
  hf_token = environ.get('HFTOKEN')
38
  model = MaskFormerForInstanceSegmentation.from_pretrained(model_id, use_auth_token=hf_token)
39
 
 
 
 
 
40
 
41
+ def visualize_instance_seg_mask(img_in, mask, id2label, included_labels):
42
  img_out = np.zeros((mask.shape[0], mask.shape[1], 3))
43
  image_total_pixels = mask.shape[0] * mask.shape[1]
44
  label_ids = np.unique(mask)
 
45
 
46
  id2color = {id: palette[id] for id in label_ids}
47
  id2count = {id: 0 for id in label_ids}
 
57
  f"{id2label[id]}",
58
  f"{(100 * id2count[id] / image_total_pixels):.2f} %",
59
  f"{np.sqrt(id2count[id] / image_total_pixels):.2f} m"
60
+ ] for id in label_ids if id2label[id] in included_labels]
61
 
62
  if len(dataframe) < 1:
63
  dataframe = [[
 
73
  img = np.array(Image.open(image_path))
74
  img_size = (img.shape[0], img.shape[1])
75
  inputs = preprocessor(images=test_transform(img), return_tensors="pt")
 
76
  outputs = model(**inputs)
 
77
  results = preprocessor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[img_size])[0]
78
+ mask_img, dataframe = visualize_instance_seg_mask(img, results.numpy(), model.config.id2label, vegetation_labels)
79
  return mask_img, dataframe
80
 
81
 
82
  demo = gr.Interface(
83
+ title="Maskformer Satellite+Trees",
84
+ description="Using a finetuned version of the [facebook/maskformer-swin-base-ade](https://huggingface.co/facebook/maskformer-swin-base-ade) model (created specifically to work with satellite images) to calculate percentage of pixels in an image that belong to vegetation.",
85
+
86
  fn=query_image,
87
  inputs=[gr.Image(type="filepath", label="Input Image")],
88
  outputs=[
89
  gr.Image(label="Vegetation"),
90
  gr.DataFrame(label="Info", headers=["Object Label", "Pixel Percent", "Square Length"])
91
  ],
92
+
 
 
93
  examples=example_images,
94
+ cache_examples=True,
95
+
96
+ allow_flagging="never",
97
+ analytics_enabled=None
98
  )
99
 
 
100
  demo.launch(show_api=False)