123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194 |
- from torch import nn
- import torch.utils.model_zoo as model_zoo
- def _make_divisible(v, divisor, min_value=None):
- """
- This function is taken from the original tf repo.
- It ensures that all layers have a channel number that is divisible by 8
- It can be seen here:
- https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
- :param v:
- :param divisor:
- :param min_value:
- :return:
- """
- if min_value is None:
- min_value = divisor
- new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
- # Make sure that round down does not go down by more than 10%.
- if new_v < 0.9 * v:
- new_v += divisor
- return new_v
- class ConvBNReLU(nn.Sequential):
- def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
- padding = (kernel_size - 1) // 2
- super(ConvBNReLU, self).__init__(
- nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
- nn.BatchNorm2d(out_planes),
- nn.ReLU6(inplace=True)
- )
- class InvertedResidual(nn.Module):
- def __init__(self, inp, oup, stride, expand_ratio):
- super(InvertedResidual, self).__init__()
- self.stride = stride
- assert stride in [1, 2]
- hidden_dim = int(round(inp * expand_ratio))
- self.use_res_connect = self.stride == 1 and inp == oup
- layers = []
- if expand_ratio != 1:
- # pw
- layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
- layers.extend([
- # dw
- ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
- # pw-linear
- nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
- nn.BatchNorm2d(oup),
- ])
- self.conv = nn.Sequential(*layers)
- def forward(self, x):
- if self.use_res_connect:
- return x + self.conv(x)
- else:
- return self.conv(x)
- class MobileNetV2(nn.Module):
- def __init__(self, pretrained=True):
- """
- MobileNet V2 main class
- Args:
- num_classes (int): Number of classes
- width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
- inverted_residual_setting: Network structure
- round_nearest (int): Round the number of channels in each layer to be a multiple of this number
- Set to 1 to turn off rounding
- block: Module specifying inverted residual building block for mobilenet
- """
- super(MobileNetV2, self).__init__()
- block = InvertedResidual
- input_channel = 32
- last_channel = 1280
- width_mult = 1.0
- round_nearest=8
- inverted_residual_setting = [
- # t, c, n, s
- [1, 16, 1, 1],
- [6, 24, 2, 2],
- [6, 32, 3, 2],
- [6, 64, 4, 2],
- [6, 96, 3, 1],
- [6, 160, 3, 2],
- [6, 320, 1, 1],
- ]
- # only check the first element, assuming user knows t,c,n,s are required
- if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
- raise ValueError("inverted_residual_setting should be non-empty "
- "or a 4-element list, got {}".format(inverted_residual_setting))
- # building first layer
- input_channel = _make_divisible(input_channel * width_mult, round_nearest)
- self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
- features = [ConvBNReLU(3, input_channel, stride=2)]
- # building inverted residual blocks
- for t, c, n, s in inverted_residual_setting:
- output_channel = _make_divisible(c * width_mult, round_nearest)
- for i in range(n):
- stride = s if i == 0 else 1
- features.append(block(input_channel, output_channel, stride, expand_ratio=t))
- input_channel = output_channel
- # building last several layers
- features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
- # make it nn.Sequential
- self.features = nn.Sequential(*features)
- # building classifier
- # self.classifier = nn.Sequential(
- # nn.Dropout(0.2),
- # nn.Linear(self.last_channel, num_classes),
- # )
- self.toplayer = nn.Conv2d(160, 32, kernel_size=1, stride=1, padding=0)
- self.latlayer1 = nn.Conv2d(64, 32, kernel_size=1, stride=1, padding=0)
- self.latlayer2 = nn.Conv2d( 32, 32, kernel_size=1, stride=1, padding=0)
- self.latlayer3 = nn.Conv2d( 24, 32, kernel_size=1, stride=1, padding=0)
- self.smooth1 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)
- self.smooth2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)
- self.smooth3 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)
- self.fpn_selected = [2, 5, 9, 15]
- # weight initialization
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- nn.init.kaiming_normal_(m.weight, mode='fan_out')
- if m.bias is not None:
- nn.init.zeros_(m.bias)
- elif isinstance(m, nn.BatchNorm2d):
- nn.init.ones_(m.weight)
- nn.init.zeros_(m.bias)
- elif isinstance(m, nn.Linear):
- nn.init.normal_(m.weight, 0, 0.01)
- nn.init.zeros_(m.bias)
- if pretrained:
- self._load_pretrained_model()
- def _forward_impl(self, x):
- # This exists since TorchScript doesn't support inheritance, so the superclass method
- # (this one) needs to have a name other than `forward` that can be accessed in a subclass
- fpn_features = []
- for i, f in enumerate(self.features):
- x = f(x)
- if i in self.fpn_selected:
- fpn_features.append(x)
- c2, c3, c4, c5 = fpn_features
- # Top-down
- p5 = self.toplayer(c5)
- p4 = nn.functional.upsample(p5, size=c4.size()[2:], mode='bilinear', align_corners=True) + self.latlayer1(c4)
- p3 = nn.functional.upsample(p4, size=c3.size()[2:], mode='bilinear', align_corners=True) + self.latlayer2(c3)
- p2 = nn.functional.upsample(p3, size=c2.size()[2:], mode='bilinear', align_corners=True) + self.latlayer3(c2)
-
- p4 = self.smooth1(p4)
- p3 = self.smooth2(p3)
- p2 = self.smooth3(p2)
-
- return p2, p3, p4, p5
- # x = self.features(x)
- # Cannot use "squeeze" as batch-size can be 1 => must use reshape with x.shape[0]
- # x = nn.functional.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], -1)
- # x = self.classifier(x)
- # return x
- def forward(self, x):
- return self._forward_impl(x)
- def _load_pretrained_model(self):
- pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
- model_dict = {}
- state_dict = self.state_dict()
- for k, v in pretrain_dict.items():
- if k in state_dict:
- model_dict[k] = v
- state_dict.update(model_dict)
- self.load_state_dict(state_dict)
- def MobileNet_FPN(output_stride=None, BatchNorm=nn.BatchNorm2d, pretrained=True):
- """Constructs a ResNet-101 model.
- Args:
- pretrained (bool): If True, returns a model pre-trained on ImageNet
- """
- model = MobileNetV2(pretrained=pretrained)
- return model
|