from fastai.vision.all import * import gradio as gr import torchvision.transforms as transforms import torch # Definimos todo lo necesario para hacer inferencia from albumentations import ( Compose, OneOf, ElasticTransform, GridDistortion, OpticalDistortion, HorizontalFlip, Rotate, Transpose, CLAHE, ShiftScaleRotate ) class SegmentationAlbumentationsTransform(ItemTransform): split_idx = 0 def __init__(self, aug): self.aug = aug def encodes(self, x): img,mask = x aug = self.aug(image=np.array(img), mask=np.array(mask)) return PILImage.create(aug["image"]), PILMask.create(aug["mask"]) class TargetMaskConvertTransform(ItemTransform): def __init__(self): pass def encodes(self, x): img,mask = x #Convert to array mask = np.array(mask) # background = 0, leaves = 1, pole = 74 o 76, wood = 25 o 29, grape = 255 mask[mask == 255] = 1 # grape mask[mask == 150] = 2 # leaves mask[mask == 76] = 3 ; mask[mask == 74] = 3 # pole mask[mask == 29] = 4 ; mask[mask == 25] = 4 # wood mask[mask >= 5] = 0 # resto: background # Back to PILMask mask = PILMask.create(mask) return img, mask def transform_image(image, device): my_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize( [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) image_aux = image return my_transforms(image_aux).unsqueeze(0).to(device) def mask_to_img(mask): mask[mask == 1] = 255 # grape mask[mask == 2] = 150 # leaves mask[mask == 3] = 74 # pole mask[mask == 4] = 25 # wood mask=np.reshape(mask,(480,640)) return mask # Definimos una funciĆ³n que se encarga de llevar a cabo las predicciones def predict(img): learn = load_learner('export.pkl') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = learn.cpu() model.eval() image = transforms.Resize((480,640))(img) tensor = transform_image(image, device) model.to(device) with torch.no_grad(): outputs = model(tensor) outputs = torch.argmax(outputs,1) mask = np.array(outputs.cpu()) mask = mask_to_img(mask) return Image.fromarray(mask.astype('uint8')) # Creamos la interfaz y la lanzamos. gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs=gr.outputs.Image(),examples=['color_154.jpg','color_155.jpg']).launch(share=False)