allispaul commited on
Commit
3898f71
1 Parent(s): 482c11f

initial commit

Browse files
app.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from timeit import default_timer as timer
3
+ from typing import Tuple
4
+ from pathlib import Path
5
+ from PIL import Image
6
+
7
+ import gradio as gr
8
+ import torch
9
+ from torch import nn
10
+ from torchvision import transforms
11
+
12
+ from model import create_effnetb3_model
13
+
14
+ class_names = ['apple_pie', 'baby_back_ribs', 'baklava', 'beef_carpaccio', 'beef_tartare',
15
+ 'beet_salad', 'beignets', 'bibimbap', 'bread_pudding', 'breakfast_burrito',
16
+ 'bruschetta', 'caesar_salad', 'cannoli', 'caprese_salad', 'carrot_cake',
17
+ 'ceviche', 'cheese_plate', 'cheesecake', 'chicken_curry', 'chicken_quesadilla',
18
+ 'chicken_wings', 'chocolate_cake', 'chocolate_mousse', 'churros', 'clam_chowder',
19
+ 'club_sandwich', 'crab_cakes', 'creme_brulee', 'croque_madame', 'cup_cakes',
20
+ 'deviled_eggs', 'donuts', 'dumplings', 'edamame', 'eggs_benedict',
21
+ 'escargots', 'falafel', 'filet_mignon', 'fish_and_chips', 'foie_gras',
22
+ 'french_fries', 'french_onion_soup', 'french_toast', 'fried_calamari', 'fried_rice',
23
+ 'frozen_yogurt', 'garlic_bread', 'gnocchi', 'greek_salad', 'grilled_cheese_sandwich',
24
+ 'grilled_salmon', 'guacamole', 'gyoza', 'hamburger', 'hot_and_sour_soup',
25
+ 'hot_dog', 'huevos_rancheros', 'hummus', 'ice_cream', 'lasagna',
26
+ 'lobster_bisque', 'lobster_roll_sandwich', 'macaroni_and_cheese', 'macarons', 'miso_soup',
27
+ 'mussels', 'nachos', 'omelette', 'onion_rings', 'oysters',
28
+ 'pad_thai', 'paella', 'pancakes', 'panna_cotta', 'peking_duck',
29
+ 'pho', 'pizza', 'pork_chop', 'poutine', 'prime_rib',
30
+ 'pulled_pork_sandwich', 'ramen', 'ravioli', 'red_velvet_cake', 'risotto',
31
+ 'samosa', 'sashimi', 'scallops', 'seaweed_salad', 'shrimp_and_grits',
32
+ 'spaghetti_bolognese', 'spaghetti_carbonara', 'spring_rolls', 'steak', 'strawberry_shortcake',
33
+ 'sushi', 'tacos', 'takoyaki', 'tiramisu', 'tuna_tartare', 'waffles']
34
+
35
+ device = "cpu"
36
+
37
+ # Create model
38
+ effnetb3, effnetb3_transforms = create_effnetb3_model(num_classes=len(class_names))
39
+
40
+ # Load saved weights
41
+ effnetb3_state_dict = torch.load("effnetb3_full_food101.pth",
42
+ map_location=torch.device(device))
43
+ effnetb3_state_dict['classifier.1.weight'] = effnetb3_state_dict.pop('classifier.weight')
44
+ effnetb3_state_dict['classifier.1.bias'] = effnetb3_state_dict.pop('classifier.bias')
45
+ effnetb3.load_state_dict(effnetb3_state_dict)
46
+ effnetb3.to(device);
47
+
48
+ # Define predict function
49
+ def predict(img: Image) -> Tuple[dict, float]:
50
+ """Uses EffnetB3 model to transform and predict on img. Returns prediction
51
+ probabilities and time taken.
52
+
53
+ Args:
54
+ img (PIL.Image): Image to predict on.
55
+
56
+ Returns:
57
+ A tuple (pred_labels_and_probs, pred_time), where pred_labels_and_probs
58
+ is a dict mapping each class name to the probability the model assigns to
59
+ it, and pred_time is the time taken to predict (in seconds).
60
+ """
61
+ start_time = timer()
62
+ img = effnetb3_transforms(img).unsqueeze(0)
63
+ effnetb3.eval()
64
+ with torch.inference_mode():
65
+ pred_probs = torch.softmax(effnetb3(img), dim=1)
66
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i])
67
+ for i in range(len(class_names))}
68
+ pred_time = round(timer() - start_time, 4)
69
+ return pred_labels_and_probs, pred_time
70
+
71
+ # Initialize Gradio app
72
+ title = "FoodVision"
73
+ description = "EfficientNetB3 feature extractor to classify images of food. Upload an image or click on one of the examples to try it out!"
74
+ article = """
75
+ From the [Zero to Mastery PyTorch tutorial](https://www.learnpytorch.io/09_pytorch_model_deployment/), using the
76
+ [Food-101 dataset](https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/).
77
+ """
78
+ examples = [[example] for example in Path("examples").glob("*.jpg")]
79
+
80
+ demo = gr.Interface(
81
+ fn=predict,
82
+ inputs=gr.Image(type="pil"),
83
+ outputs=[gr.Label(num_top_classes=3, label="Predictions"),
84
+ gr.Number(label="Prediction time (s)")],
85
+ examples=examples,
86
+ title=title,
87
+ description=description,
88
+ article=article,
89
+ )
90
+
91
+ demo.launch()
effnetb3_full_food101.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:221dfc2c8bcb2664081e0c57fffcb04001e77b523613538aa29f1ed2870c5c79
3
+ size 43989701
examples/.ipynb_checkpoints/3301718-checkpoint.jpg ADDED
examples/2522597.jpg ADDED
examples/3301718.jpg ADDED
examples/368383.jpg ADDED
examples/3890499.jpg ADDED
examples/999399.jpg ADDED
model.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+
3
+ import torch
4
+ from torch import nn
5
+ import torchvision
6
+
7
+ def create_effnetb3_model(num_classes: int = 101,
8
+ seed: int = 4,
9
+ ) -> Tuple[nn.Module, torchvision.transforms.Compose]:
10
+ """Create an EfficientNetB2 feature extractor model and transforms.
11
+
12
+ Args:
13
+ num_classes: Number of classes to use for classification (default 3).
14
+ seed: Random seed for reproducibility (default 4).
15
+
16
+ Returns:
17
+ A tuple (model, transforms) of the model and its image transforms.
18
+ """
19
+ weights = torchvision.models.EfficientNet_B3_Weights.DEFAULT
20
+ transforms = weights.transforms()
21
+ model = torchvision.models.efficientnet_b3(weights=weights)
22
+
23
+ # Freeze parameters below the head
24
+ for param in model.parameters():
25
+ param.requires_grad = False
26
+ # Replace the classifier head with one of appropriate size for the problem
27
+ torch.manual_seed(seed)
28
+ model.classifier = nn.Sequential(
29
+ nn.Dropout(p=0.3, inplace=True),
30
+ nn.Linear(in_features=1536, out_features=num_classes)
31
+ )
32
+ return model, transforms
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio==3.37.0
2
+ torch==2.0.1
3
+ torchvision==0.15.2