Module earthvision.models.resisc45.regnet

Expand source code
# Modified from
# https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/anynet.py
# https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py
from functools import partial
from typing import Any

import torch
from torch import nn
from torchvision.models.regnet import BlockParams
from torchvision.models import RegNet

from .utils import load_state_dict_from_url


__all__ = ["RegNet", "regnet_y_400mf"]


model_urls = {
    "regnet_y_400mf": (
        "https://drive.google.com/uc?id=1gtoXOxQwt8_J64qFsYsXFh2iQPeln0bq",
        "resisc45_regnet_y_400mf.pth",
    )
}


class RegNet45Class(RegNet):
    def __init__(self, block_params, norm_layer):
        super().__init__(block_params, norm_layer=norm_layer, num_classes=45)


def _regnet(
    arch: str, block_params: BlockParams, pretrained: bool, progress: bool, **kwargs: Any
) -> RegNet45Class:
    norm_layer = kwargs.pop("norm_layer", partial(nn.BatchNorm2d, eps=1e-05, momentum=0.1))
    model = RegNet45Class(block_params, norm_layer=norm_layer, **kwargs)
    if pretrained:
        if arch not in model_urls:
            raise ValueError(f"No checkpoint is available for model type {arch}")
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        state_dict = load_state_dict_from_url(model_urls[arch], map_location=device)
        model.load_state_dict(state_dict)
    return model


def regnet_y_400mf(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> RegNet:
    """
    Constructs a RegNetY_400MF architecture from
    `"Designing Network Design Spaces" <https://arxiv.org/abs/2003.13678>`_.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    params = BlockParams.from_init_params(
        depth=16, w_0=48, w_a=27.89, w_m=2.09, group_width=8, se_ratio=0.25, **kwargs
    )
    return _regnet("regnet_y_400mf", params, pretrained, progress, **kwargs)

Functions

def regnet_y_400mf(pretrained: bool = False, progress: bool = True, **kwargs: Any) ‑> torchvision.models.regnet.RegNet

Constructs a RegNetY_400MF architecture from "Designing Network Design Spaces" <https://arxiv.org/abs/2003.13678>_.

Args

pretrained : bool
If True, returns a model pre-trained on ImageNet
progress : bool
If True, displays a progress bar of the download to stderr
Expand source code
def regnet_y_400mf(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> RegNet:
    """
    Constructs a RegNetY_400MF architecture from
    `"Designing Network Design Spaces" <https://arxiv.org/abs/2003.13678>`_.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    params = BlockParams.from_init_params(
        depth=16, w_0=48, w_a=27.89, w_m=2.09, group_width=8, se_ratio=0.25, **kwargs
    )
    return _regnet("regnet_y_400mf", params, pretrained, progress, **kwargs)

Classes

class RegNet (block_params: torchvision.models.regnet.BlockParams, num_classes: int = 1000, stem_width: int = 32, stem_type: Optional[Callable[..., torch.nn.modules.module.Module]] = None, block_type: Optional[Callable[..., torch.nn.modules.module.Module]] = None, norm_layer: Optional[Callable[..., torch.nn.modules.module.Module]] = None, activation: Optional[Callable[..., torch.nn.modules.module.Module]] = None)

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

Initializes internal Module state, shared by both nn.Module and ScriptModule.

Expand source code
class RegNet(nn.Module):
    def __init__(
        self,
        block_params: BlockParams,
        num_classes: int = 1000,
        stem_width: int = 32,
        stem_type: Optional[Callable[..., nn.Module]] = None,
        block_type: Optional[Callable[..., nn.Module]] = None,
        norm_layer: Optional[Callable[..., nn.Module]] = None,
        activation: Optional[Callable[..., nn.Module]] = None,
    ) -> None:
        super().__init__()

        if stem_type is None:
            stem_type = SimpleStemIN
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        if block_type is None:
            block_type = ResBottleneckBlock
        if activation is None:
            activation = nn.ReLU

        # Ad hoc stem
        self.stem = stem_type(
            3,  # width_in
            stem_width,
            norm_layer,
            activation,
        )

        current_width = stem_width

        blocks = []
        for i, (
            width_out,
            stride,
            depth,
            group_width,
            bottleneck_multiplier,
        ) in enumerate(block_params._get_expanded_params()):
            blocks.append(
                (
                    f"block{i+1}",
                    AnyStage(
                        current_width,
                        width_out,
                        stride,
                        depth,
                        block_type,
                        norm_layer,
                        activation,
                        group_width,
                        bottleneck_multiplier,
                        block_params.se_ratio,
                        stage_index=i + 1,
                    ),
                )
            )

            current_width = width_out

        self.trunk_output = nn.Sequential(OrderedDict(blocks))

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(in_features=current_width, out_features=num_classes)

        # Init weights and good to go
        self._reset_parameters()

    def forward(self, x: Tensor) -> Tensor:
        x = self.stem(x)
        x = self.trunk_output(x)

        x = self.avgpool(x)
        x = x.flatten(start_dim=1)
        x = self.fc(x)

        return x

    def _reset_parameters(self) -> None:
        # Performs ResNet-style weight initialization
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                # Note that there is no bias due to BN
                fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                nn.init.normal_(m.weight, mean=0.0, std=math.sqrt(2.0 / fan_out))
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, mean=0.0, std=0.01)
                nn.init.zeros_(m.bias)

Ancestors

  • torch.nn.modules.module.Module

Subclasses

  • earthvision.models.resisc45.regnet.RegNet45Class

Class variables

var dump_patches : bool
var training : bool

Methods

def forward(self, x: torch.Tensor) ‑> torch.Tensor

Defines the computation performed at every call.

Should be overridden by all subclasses.

Note

Although the recipe for forward pass needs to be defined within this function, one should call the :class:Module instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.

Expand source code
def forward(self, x: Tensor) -> Tensor:
    x = self.stem(x)
    x = self.trunk_output(x)

    x = self.avgpool(x)
    x = x.flatten(start_dim=1)
    x = self.fc(x)

    return x