File size: 1,454 Bytes
085e6df
 
f5e4284
cf4623e
085e6df
da9c964
f5e4284
 
 
 
 
 
 
085e6df
 
55351f1
 
 
 
 
c6d4273
da9c964
55351f1
 
 
 
 
6c792e5
 
55351f1
 
 
 
 
 
 
 
 
085e6df
 
392b092
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from fastai.vision.all import *
import gradio as gr
import torchvision.transforms as transforms
import torch

def transform_image(device, image):
    my_transforms = transforms.Compose([transforms.ToTensor(),
                                        transforms.Normalize(
                                            [0.485, 0.456, 0.406],
                                            [0.229, 0.224, 0.225])])
    image_aux = image
    return my_transforms(image_aux).unsqueeze(0).to(device)

# Definimos una función que se encarga de llevar a cabo las predicciones
def predict(img):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 
    model = torch.jit.load("model.pth")
    model = model.cpu()
    model.eval()
    
    image = transforms.Resize((480,640))(Image.fromarray(img))
    tensor = transform_image(device, image)
    
    model.to(device)
    with torch.no_grad():
        outputs = model(tensor)
        
    outputs = torch.argmax(outputs,1)
        
    mask = np.array(outputs.cpu())
    mask[mask == 1] = 255 # grape
    mask[mask == 2] = 150 # leaves
    mask[mask == 3] = 76 # pole
    mask[mask == 4] = 29 # wood
    
    mask=np.reshape(mask,(480,640))
    
    return Image.fromarray(mask.astype('uint8'))
    
# Creamos la interfaz y la lanzamos. 
gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs=gr.outputs.Image(),examples=['color_154.jpg','color_155.jpg']).launch(share=False)