Spaces:
Runtime error
Runtime error
Upload 13 files
Browse files- custom_resnet.py +87 -0
- examples/test_0.jpg +0 -0
- examples/test_1.jpg +0 -0
- examples/test_2.jpg +0 -0
- examples/test_3.jpg +0 -0
- examples/test_4.jpg +0 -0
- examples/test_5.jpg +0 -0
- examples/test_6.jpg +0 -0
- examples/test_7.jpg +0 -0
- examples/test_8.jpg +0 -0
- examples/test_9.jpg +0 -0
- results/custom_resnet_trained.pth +3 -0
- utils.py +53 -0
custom_resnet.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# model.py file
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch.nn.functional as F
|
6 |
+
|
7 |
+
class BasicBlock(nn.Module):
|
8 |
+
|
9 |
+
def __init__(self, in_channels, out_channels, stride=1):
|
10 |
+
super(BasicBlock, self).__init__()
|
11 |
+
|
12 |
+
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
|
13 |
+
stride=stride, padding=1, bias=False)
|
14 |
+
self.bn1 = nn.BatchNorm2d(out_channels)
|
15 |
+
|
16 |
+
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
|
17 |
+
stride=1, padding=1, bias=False)
|
18 |
+
self.bn2 = nn.BatchNorm2d(out_channels)
|
19 |
+
|
20 |
+
self.relu = nn.ReLU()
|
21 |
+
|
22 |
+
def forward(self, x):
|
23 |
+
x = self.relu(self.bn1(self.conv1(x)))
|
24 |
+
x = self.relu(self.bn2(self.conv2(x)))
|
25 |
+
return x
|
26 |
+
|
27 |
+
class ResNet(nn.Module):
|
28 |
+
def __init__(self, block, num_classes=10):
|
29 |
+
super(ResNet, self).__init__()
|
30 |
+
|
31 |
+
self.preparation = nn.Sequential(
|
32 |
+
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
|
33 |
+
nn.BatchNorm2d(64),
|
34 |
+
nn.ReLU()
|
35 |
+
)
|
36 |
+
|
37 |
+
self.layer1 = nn.Sequential(
|
38 |
+
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False),
|
39 |
+
nn.MaxPool2d(2, 2),
|
40 |
+
nn.BatchNorm2d(128),
|
41 |
+
nn.ReLU()
|
42 |
+
)
|
43 |
+
|
44 |
+
self.residual1 = block(128, 128, 1)
|
45 |
+
|
46 |
+
self.layer2 = nn.Sequential(
|
47 |
+
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=False),
|
48 |
+
nn.MaxPool2d(2, 2),
|
49 |
+
nn.BatchNorm2d(256),
|
50 |
+
nn.ReLU()
|
51 |
+
)
|
52 |
+
|
53 |
+
self.layer3 = nn.Sequential(
|
54 |
+
nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=False),
|
55 |
+
nn.MaxPool2d(2, 2),
|
56 |
+
nn.BatchNorm2d(512),
|
57 |
+
nn.ReLU()
|
58 |
+
)
|
59 |
+
|
60 |
+
self.residual3 = block(512, 512, 1)
|
61 |
+
|
62 |
+
self.maxpool2d = nn.MaxPool2d(4, 4)
|
63 |
+
self.fc = nn.Linear(512, num_classes)
|
64 |
+
|
65 |
+
def forward(self, x):
|
66 |
+
|
67 |
+
x = self.preparation(x)
|
68 |
+
|
69 |
+
x = self.layer1(x)
|
70 |
+
res1 = self.residual1(x)
|
71 |
+
x = x + res1
|
72 |
+
|
73 |
+
x = self.layer2(x)
|
74 |
+
|
75 |
+
x = self.layer3(x)
|
76 |
+
res3 = self.residual3(x)
|
77 |
+
x = x + res3
|
78 |
+
|
79 |
+
x = self.maxpool2d(x)
|
80 |
+
x = x.view(x.size(0), -1)
|
81 |
+
|
82 |
+
x = self.fc(x)
|
83 |
+
|
84 |
+
return x
|
85 |
+
|
86 |
+
def Custom_ResNet():
|
87 |
+
return ResNet(BasicBlock, num_classes=10)
|
examples/test_0.jpg
ADDED
examples/test_1.jpg
ADDED
examples/test_2.jpg
ADDED
examples/test_3.jpg
ADDED
examples/test_4.jpg
ADDED
examples/test_5.jpg
ADDED
examples/test_6.jpg
ADDED
examples/test_7.jpg
ADDED
examples/test_8.jpg
ADDED
examples/test_9.jpg
ADDED
results/custom_resnet_trained.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d0950f4cb97b78183165e325857b6c057eddd54bc82df71831879611de4f3b42
|
3 |
+
size 26326759
|
utils.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# utils file
|
2 |
+
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import torch
|
5 |
+
from torchsummary import summary
|
6 |
+
from torchvision import transforms
|
7 |
+
import torchvision
|
8 |
+
import numpy as np
|
9 |
+
import cv2
|
10 |
+
|
11 |
+
cv2.setNumThreads(0)
|
12 |
+
cv2.ocl.setUseOpenCL(False)
|
13 |
+
|
14 |
+
class Cifar10SearchDataset(torchvision.datasets.CIFAR10):
|
15 |
+
|
16 |
+
def __init__(self, root="./data", train=True, download=True, transform=None):
|
17 |
+
super().__init__(root=root, train=train, download=download, transform=transform)
|
18 |
+
|
19 |
+
def __getitem__(self, index):
|
20 |
+
image, label = self.data[index], self.targets[index]
|
21 |
+
|
22 |
+
if self.transform is not None:
|
23 |
+
transformed = self.transform(image=image)
|
24 |
+
image = transformed["image"]
|
25 |
+
|
26 |
+
return image, label
|
27 |
+
|
28 |
+
import albumentations as A
|
29 |
+
from albumentations.pytorch.transforms import ToTensorV2
|
30 |
+
|
31 |
+
import numpy as np
|
32 |
+
import cv2
|
33 |
+
|
34 |
+
cv2.setNumThreads(0)
|
35 |
+
cv2.ocl.setUseOpenCL(False)
|
36 |
+
|
37 |
+
def augmentation_custom_resnet(data, mu=(0.49139968, 0.48215827, 0.44653124), sigma=(0.24703233, 0.24348505, 0.26158768), pad=4):
|
38 |
+
|
39 |
+
if data == 'Train':
|
40 |
+
transform = A.Compose([A.PadIfNeeded(min_height=32+pad,
|
41 |
+
min_width=32+pad,
|
42 |
+
border_mode=cv2.BORDER_CONSTANT,
|
43 |
+
value=np.mean(mu)),
|
44 |
+
A.RandomCrop(32, 32),
|
45 |
+
A.HorizontalFlip(p=0.5),
|
46 |
+
A.Cutout(max_h_size=8, max_w_size=8),
|
47 |
+
A.Normalize(mean=mu, std=sigma),
|
48 |
+
ToTensorV2()])
|
49 |
+
else:
|
50 |
+
transform = A.Compose([A.Normalize(mean=mu, std=sigma),
|
51 |
+
ToTensorV2()])
|
52 |
+
|
53 |
+
return transform
|