File size: 1,601 Bytes
cf71845 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import torch
import torch.nn as nn
import torch.optim as optim
class UNet(nn.Module):
def __init__(self):
super(UNet, self).__init__()
# Encoder
self.encoder = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1), # 256 -> 128
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1), # 128 -> 64
nn.ReLU(inplace=True),
nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1), # 64 -> 32
nn.ReLU(inplace=True),
nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1), # 32 -> 16
nn.ReLU(inplace=True),
nn.Conv2d(512, 1024, kernel_size=4, stride=2, padding=1), # 16 -> 8
nn.ReLU(inplace=True)
)
# Decoder
self.decoder = nn.Sequential(
nn.ConvTranspose2d(1024, 512, kernel_size=4, stride=2, padding=1), # 8 -> 16
nn.ReLU(inplace=True),
nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1), # 16 -> 32
nn.ReLU(inplace=True),
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1), # 32 -> 64
nn.ReLU(inplace=True),
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1), # 64 -> 128
nn.ReLU(inplace=True),
nn.ConvTranspose2d(64, 3, kernel_size=4, stride=2, padding=1), # 128 -> 256
nn.Tanh() # Output range [-1, 1]
)
def forward(self, x):
enc = self.encoder(x)
dec = self.decoder(enc)
return dec |