文章目录
1. 卷积 2. python 代码 3. notes
1. 卷积
输入A张量为:
A
=
[
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
]
\begin{equation} A=\begin{bmatrix} 0&1&2&3\\\\ 4&5&6&7\\\\ 8&9&10&11\\\\ 12&13&14&15 \end{bmatrix} \end{equation}
A =
0 4 8 12 1 5 9 13 2 6 10 14 3 7 11 15
卷积核矩阵
w
e
i
g
h
t
=
[
0
1
2
3
4
5
6
7
8
]
,
b
i
a
s
=
10
\begin{equation} weight=\begin{bmatrix} 0&1&2\\\\ 3&4&5\\\\ 6&7&8 \end{bmatrix},bias = 10 \end{equation}
w e i g h t =
0 3 6 1 4 7 2 5 8
, bia s = 10
2. python 代码
import torch
import torch. nn as nn
import torch. nn. functional as F
torch. set_printoptions( precision= 3 , sci_mode= False )
if __name__ == "__main__" :
run_code = 0
batch_size = 1
in_channels = 1
out_channels = 1
kernel_size = 4
input_h = 4
input_w = 4
input_total = input_h * input_w
input_matrix = torch. arange( input_total) . reshape( batch_size, in_channels, input_h, input_w) . to( torch. float )
my_conv2d = nn. Conv2d( in_channels= in_channels, out_channels= out_channels, kernel_size= kernel_size, bias= True ,
stride= 1 )
print ( f"my_conv2d= { my_conv2d} " )
my_conv2d_weight = torch. arange( 9 ) . reshape( ( 1 , 1 , 3 , 3 ) ) . to( torch. float )
my_conv2d_bias = torch. tensor( [ 10.0 ] )
my_conv2d. weight = nn. Parameter( my_conv2d_weight)
my_conv2d. bias = nn. Parameter( my_conv2d_bias)
print ( f"my_conv2d_weight=\n { my_conv2d_weight} " )
print ( f"my_conv2d_bias=\n { my_conv2d_bias} " )
output_matrix = my_conv2d( input_matrix)
print ( f"input_matrix=\n { input_matrix} " )
print ( f"output_matrix=\n { output_matrix} " )
output_matrix_F = F. conv2d( input = input_matrix, weight= my_conv2d_weight, bias= my_conv2d_bias)
print ( f"output_matrix_F=\n { output_matrix_F} " )
my_conv2d= Conv2d( 1 , 1 , kernel_size= ( 4 , 4 ) , stride= ( 1 , 1 ) )
my_conv2d_weight=
tensor( [ [ [ [ 0. , 1. , 2. ] ,
[ 3. , 4. , 5. ] ,
[ 6. , 7. , 8. ] ] ] ] )
my_conv2d_bias=
tensor( [ 10. ] )
input_matrix=
tensor( [ [ [ [ 0. , 1. , 2. , 3. ] ,
[ 4. , 5. , 6. , 7. ] ,
[ 8. , 9. , 10. , 11. ] ,
[ 12. , 13. , 14. , 15. ] ] ] ] )
output_matrix=
tensor( [ [ [ [ 268. , 304. ] ,
[ 412. , 448. ] ] ] ] , grad_fn= < ConvolutionBackward0> )
output_matrix_F=
tensor( [ [ [ [ 268. , 304. ] ,
[ 412. , 448. ] ] ] ] )
3. notes