Spaces:
Runtime error
Runtime error
File size: 1,583 Bytes
6e4df47 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import numpy as np
import cv2
import gradio as gr
import torch
from ade20k_colors import colors
from transformers import BeitFeatureExtractor, BeitForSemanticSegmentation
beit_models = ['microsoft/beit-base-finetuned-ade-640-640',
'microsoft/beit-large-finetuned-ade-640-640']
models = [BeitForSemanticSegmentation.from_pretrained(m) for m in beit_models]
extractors = [BeitFeatureExtractor.from_pretrained(m) for m in beit_models]
def apply_colors(img):
ret = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
for y in range(img.shape[0]):
for x in range(img.shape[1]):
ret[y,x] = colors[np.argmax(img[y,x])]
return ret
def inference(image, chosen_model):
feature_extractor = extractors[chosen_model]
model = models[chosen_model]
inputs = feature_extractor(images=image, return_tensors='pt')
outputs = model(**inputs)
logits = outputs.logits
output = torch.sigmoid(logits).detach().numpy()[0]
output = np.transpose(output, (1,2,0))
output = apply_colors(output)
return cv2.resize(output, image.shape[1::-1])
inputs = [gr.inputs.Image(label='Input Image'),
gr.inputs.Radio(['Base', 'Large'], label='BEiT Model', type='index')]
gr.Interface(
inference,
inputs,
gr.outputs.Image(label='Output'),
title='BEiT - Semantic Segmentation',
description='BEIT: BERT Pre-Training of Image Transformers',
examples=[['images/armchair.jpg', 'Base'],
['images/cat.jpg', 'Base'],
['images/plant.jpg', 'Large']]
).launch() |