文章目录
- 1. 理论
- 2. python
1. 理论
A = [ 1 2 0 0 2 3 4 0 ] → B = [ 1 1 0 0 1 1 1 0 ] → C = [ 0.225 0.610 0 0 0.089 0.242 0.657 0 ] \begin{equation} A=\begin{bmatrix} 1&2&0&0\\\\ 2&3&4&0\end{bmatrix}\to B=\begin{bmatrix} 1&1&0&0\\\\ 1&1&1&0\end{bmatrix}\to C=\begin{bmatrix} 0.225&0.610&0&0\\\\ 0.089&0.242&0.657&0\end{bmatrix} \end{equation} A= 12230400 →B= 11110100 →C= 0.2250.0890.6100.24200.65700
2. python
import torch
from torch import nn
from torch.nn import functional as F
torch.set_printoptions(precision=3, sci_mode=False)
class MatrixNoneZero2OnesLike(object):
def __init__(self, in_matrix):
self.in_matrix = in_matrix
self._result = torch.zeros_like(self.in_matrix)
@property
def result(self):
my_result = torch.zeros_like(self.in_matrix)
my_result_bool = self.in_matrix.to(torch.bool)
self._result = my_result.masked_fill(my_result_bool, 1)
return self._result
class LossMaskedMatrix(object):
def __init__(self, src_matrix):
self.src_matrix = src_matrix.to(torch.float)
self.nonzero = MatrixNoneZero2OnesLike(self.src_matrix)
self.nonzero_matrix = self.nonzero.result
self._loss_matrix = torch.zeros_like(self.src_matrix)
@property
def loss_matrix(self):
my_soft_matrix = F.softmax(self.src_matrix, dim=-1)
my_loss_matrix = my_soft_matrix * self.nonzero_matrix
print(f"*" * 50)
print(f"src_matrix=\n{self.src_matrix}")
print(f"nonzero_matrix=\n{self.nonzero_matrix}")
print(f"loss_matrix=\n{my_loss_matrix}")
print(f"*" * 50)
self._loss_matrix = my_loss_matrix
return self._loss_matrix
if __name__ == "__main__":
in_matrix = torch.tensor([[1, 2, 0, 0], [2, 3, 4, 0]]).to(torch.float)
test_loss_matrix = LossMaskedMatrix(in_matrix)
result = test_loss_matrix.loss_matrix
- 结果:
**************************************************
src_matrix=
tensor([[1., 2., 0., 0.],
[2., 3., 4., 0.]])
nonzero_matrix=
tensor([[1., 1., 0., 0.],
[1., 1., 1., 0.]])
loss_matrix=
tensor([[0.225, 0.610, 0.000, 0.000],
[0.089, 0.242, 0.657, 0.000]])
**************************************************