torch.nn.modules.normalization 的源代码
import torch
import numbers
from torch.nn.parameter import Parameter
from .module import Module
from ._functions import CrossMapLRN2d as _cross_map_lrn2d
from .. import functional as F
from .. import init
from torch import Tensor, Size
from typing import Union, List, Tuple
__all__ = ['LocalResponseNorm', 'CrossMapLRN2d', 'LayerNorm', 'GroupNorm']
[docs]class LocalResponseNorm(Module):
r"""对输入信号应用局部响应归一化。
输入信号由多个输入平面组成,其中通道占据第二维度。
跨通道应用归一化。
.. math::
b_{c} = a_{c}\left(k + \frac{\alpha}{n}
\sum_{c'=\max(0, c-n/2)}^{\min(N-1,c+n/2)}a_{c'}^2\right)^{-\beta}
参数:
size: 用于归一化的相邻通道数量
alpha: 乘法因子。默认值: 0.0001
beta: 指数。默认值: 0.75
k: 加法因子。默认值: 1
形状:
- 输入: :math:`(N, C, *)`
- 输出: :math:`(N, C, *)` (与输入形状相同)
示例::
>>> lrn = nn.LocalResponseNorm(2)
>>> signal_2d = torch.randn(32, 5, 24, 24)
>>> signal_4d = torch.randn(16, 5, 7, 7, 7, 7)
>>> output_2d = lrn(signal_2d)
>>> output_4d = lrn(signal_4d)
"""
__constants__ = ['size', 'alpha', 'beta', 'k']
size: int
alpha: float
beta: float
k: float
def __init__(self, size: int, alpha: float = 1e-4, beta: float = 0.75, k: float = 1.) -> None:
super().__init__()
self.size = size
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input: Tensor) -> Tensor:
return F.local_response_norm(input, self.size, self.alpha, self.beta,
self.k)
def extra_repr(self):
return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)
class CrossMapLRN2d(Module):
size: int
alpha: float
beta: float
k: float
def __init__(self, size: int, alpha: float = 1e-4, beta: float = 0.75, k: float = 1) -> None:
super().__init__()
self.size = size
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input: Tensor) -> Tensor:
return _cross_map_lrn2d.apply(input, self.size, self.alpha, self.beta,
self.k)
def extra_repr(self) -> str:
return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)
_shape_t = Union[int, List[int], Size]
[docs]class LayerNorm(Module):
r"""对小批量输入应用层归一化。
该层实现了`Layer Normalization `__论文中描述的操作
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
均值和标准差是在最后`D`个维度上计算的,其中`D`是:attr:`normalized_shape`的维度。例如,如果:attr:`normalized_shape`
是