#try: # import detectron2 #except: import os os.system('pip install git+https://github.com/SysCV/transfiner.git') from matplotlib.pyplot import axis import gradio as gr import requests import numpy as np from torch import nn import requests import torch from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog ''' url1 = 'https://cdn.pixabay.com/photo/2014/09/07/21/52/city-438393_1280.jpg' r = requests.get(url1, allow_redirects=True) open("city1.jpg", 'wb').write(r.content) url2 = 'https://cdn.pixabay.com/photo/2016/02/19/11/36/canal-1209808_1280.jpg' r = requests.get(url2, allow_redirects=True) open("city2.jpg", 'wb').write(r.content) ''' model_name='./configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml' # model = model_zoo.get(model_name, trained=True) cfg = get_cfg() # add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library cfg.merge_from_file(model_name) cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as w ell cfg.MODEL.WEIGHTS = './output_3x_transfiner_r50.pth' if not torch.cuda.is_available(): cfg.MODEL.DEVICE='cpu' predictor = DefaultPredictor(cfg) def inference(image): img = np.array(image.resize((1024,1024))) outputs = predictor(img) v = Visualizer(img, MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2) out = v.draw_instance_predictions(outputs["instances"].to("cpu")) return out.get_image() title = "Detectron2-MaskRCNN X101" description = "demo for Detectron2. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below.\ Model: COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml" article = "
Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation | Detectron model ZOO
" gr.Interface( inference, [gr.inputs.Image(type="pil", label="Input")], gr.outputs.Image(type="numpy", label="Output"), title=title, description=description, article=article, examples=[ ["demo/sample_imgs/000000224200.jpg"], ["demo/sample_imgs/000000344909.jpg"] ]).launch()