Rubén Escobedo commited on
Commit
f5e4284
1 Parent(s): 6a6fb4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -6
app.py CHANGED
@@ -1,19 +1,49 @@
1
  from fastai.vision.all import *
2
  import gradio as gr
3
-
4
 
5
  # Cargamos el learner
6
- learn = load_learner('export.pkl')
7
 
8
  # Definimos las etiquetas de nuestro modelo
9
  labels = learn.dls.vocab
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # Definimos una función que se encarga de llevar a cabo las predicciones
13
  def predict(img):
14
- img = PILImage.create(img)
15
- pred,pred_idx,probs = learn.predict(img)
16
- return {labels[i]: float(probs[i]) for i in range(len(labels))}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  # Creamos la interfaz y la lanzamos.
19
- gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs=gr.outputs.Label(num_top_classes=3),examples=['1002_5866_6582.jpg','1038_31199_2068.jpg']).launch(share=False)
 
1
  from fastai.vision.all import *
2
  import gradio as gr
3
+ import torchvision.transforms as transforms
4
 
5
  # Cargamos el learner
6
+ learn = load_learner('best_model.pkl')
7
 
8
  # Definimos las etiquetas de nuestro modelo
9
  labels = learn.dls.vocab
10
 
11
+ def transform_image(image, device):
12
+ my_transforms = transforms.Compose([transforms.ToTensor(),
13
+ transforms.Normalize(
14
+ [0.485, 0.456, 0.406],
15
+ [0.229, 0.224, 0.225])])
16
+ image_aux = image
17
+ return my_transforms(image_aux).unsqueeze(0).to(device)
18
+
19
+ def mask_to_img(mask):
20
+ mask[mask == 1] = 255 # grape
21
+ mask[mask == 2] = 150 # leaves
22
+ mask[mask == 3] = 74 # pole
23
+ mask[mask == 4] = 25 # wood
24
+ mask=np.reshape(mask,(480,640))
25
+
26
+ return mask
27
 
28
  # Definimos una función que se encarga de llevar a cabo las predicciones
29
  def predict(img):
30
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
31
+ model = learn.cpu()
32
+ model.eval()
33
+
34
+ image = transforms.Resize((480,640))(img)
35
+ tensor = transform_image(image, device)
36
+
37
+ model.to(device)
38
+ with torch.no_grad():
39
+ outputs = model(tensor)
40
+
41
+ outputs = torch.argmax(outputs,1)
42
+
43
+ mask = np.array(outputs.cpu())
44
+ mask = mask_to_img(mask)
45
+
46
+ return Image.fromarray(mask.astype('uint8'))
47
 
48
  # Creamos la interfaz y la lanzamos.
49
+ gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs=gr.outputs.Image(shape=(128,128)),examples=['1002_5866_6582.jpg','1038_31199_2068.jpg']).launch(share=False)