Transformer - Layer Normalization
flyfish
y
=
x
−
E
[
x
]
V
a
r
[
x
]
+
ϵ
∗
γ
+
β
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
y=Var[x]+ϵx−E[x]∗γ+β
论文 Layer Normalization
import numpy as np
import torch
import torch.nn.functional as F
x = torch.Tensor([
[[-1.5256, -0.7502, -0.6540],
[-1.6095, -0.1002, -0.6092],
[-0.9798, -1.6091, -0.7121],
[ 0.3037, -0.7773, -0.2515],
[-0.2223, 1.6871, -0.3206]],
[[-0.2993, 1.8793, -0.0721],
[ 0.1578, -0.7735, 0.1991],
[ 0.0457, -1.3924, 2.6891],
[-0.1110, 0.2927, -0.1578],
[-0.0288, 2.3571, -1.0373]]])
batch, sentence_length, embedding_dim = 2, 5, 3
#可以这样简单理解一共有2个句子,每个句子有5个词向量,每个词向量权重维度是3
print("x=", x)
# 该函数是PyTorch中的层归一化操作,主要用于归一化神经网络中的层。它的作用是将输入的特征向量在每个特征维度上进行均值和方差的标准化,然后通过可学习的参数进行缩放和偏移,以保持网络的训练稳定性。
# x:输入的特征张量,形状为 [batch_size, sentence_length, embedding_dim]。
# normalized_shape:归一化形状,指定要归一化的维度大小,这里为 [sentence_length, embedding_dim]。
# weight和bias:可选的缩放因子和偏移量参数,用于对归一化后的特征进行调整。如果未指定,则默认为None,表示不使用缩放和偏移。
# eps:用于避免除以零的小值,添加到分子的方差中
nn_layer_norm = torch.nn.LayerNorm(normalized_shape=[sentence_length, embedding_dim], eps=1e-5, elementwise_affine=True)
print("torch.nn.LayerNorm=", nn_layer_norm(x))
layer_norm = F.layer_norm(x, normalized_shape=[sentence_length, embedding_dim], weight=None, bias=None, eps=1e-5)
print("F.layer_norm=", layer_norm)
# 这里的dim写最后两维的index (5, 3)
mean = torch.mean(x, dim=[1,2], keepdim=True)
print("mean:",mean)
var = torch.mean((x - mean) ** 2, dim=[1,2], keepdim=True)+ 1e-5
print("var:",var)
print("Numpy LayerNorm=", (x - mean.data) / torch.sqrt(var.data))
layer_norm = torch.nn.LayerNorm(normalized_shape=embedding_dim,elementwise_affine=False)
print(layer_norm(x))
mean = torch.mean(x, axis=-1,keepdim=True)
print("mean:",mean)
# mean: tensor([[[-0.9766],
# [-0.7730],
# [-1.1003],
# [-0.2417],
# [ 0.3814]],
# [[ 0.5026],
# [-0.1389],
# [ 0.4475],
# [ 0.0080],
# [ 0.4303]]])
# 计算输入张量x在指定轴axis上的方差。具体来说:
# var:表示计算的方差结果,是一个张量。
# x:输入的张量。
# axis:指定计算方差的轴,默认为最后一条轴-1。
# unbiased:是否使用无偏估计计算方差,默认为False,即使用有偏估计。
# keepdim:计算结果的维度是否与输入张量相同,默认为True,即保持相同维度。
var = torch.var(x, axis=-1,unbiased=False,keepdim=True)
print("var:",var)
# var: tensor([[[0.1522],
# [0.3931],
# [0.1414],
# [0.1948],
# [0.8540]],
# [[0.9562],
# [0.2017],
# [2.8571],
# [0.0409],
# [2.0257]]])
div = torch.sqrt(var+1e-05)
print("div:",div)
# div: tensor([[[0.3902],
# [0.6270],
# [0.3760],
# [0.4414],
# [0.9241]],
# [[0.9779],
# [0.4491],
# [1.6903],
# [0.2023],
# [1.4233]]])
out = (x-mean)/div
print(out)
# tensor([[[-1.4070, 0.5802, 0.8268],
# [-1.3343, 1.0731, 0.2612],
# [ 0.3206, -1.3531, 1.0325],
# [ 1.2357, -1.2135, -0.0222],
# [-0.6533, 1.4129, -0.7596]],
# [[-0.8201, 1.4078, -0.5877],
# [ 0.6606, -1.4132, 0.7526],
# [-0.2377, -1.0885, 1.3262],
# [-0.5882, 1.4077, -0.8195],
# [-0.3226, 1.3537, -1.0312]]])
print("mean keepdim=False", torch.mean(x, axis=-1,keepdim=False) )
# tensor([[-0.9766, -0.7730, -1.1003, -0.2417, 0.3814],
# [ 0.5026, -0.1389, 0.4475, 0.0080, 0.4303]])
print((1.6871 - 0.3814) / 0.9241) #1.4129423222594957
#验证均值
print((-1.5256 + -0.7502 + -0.6540)/3)#-0.9766
print((-0.2993 + 1.8793 + -0.0721)/3)#0.5026333333333334
如果按照下图的就是
batch, sentence_length, embedding_dim = 2, 1, 256
`hidden_size`: int=256, units of embeddings and encoders
norm_layer=torch.nn.LayerNorm(hidden_size)
如果用于图像
N, C, H, W = 20, 5, 10, 10
input = torch.randn(N, C, H, W)
# Normalize over the last three dimensions (i.e. the channel and spatial dimensions)
# as shown in the image below
layer_norm = nn.LayerNorm([C, H, W])
output = layer_norm(input)