文章目录
- 卷积神经网络
- 全连接神经网络与卷积神经网络的区别
- 概念性知识
- mnist数据集(卷积神经网络)
- GoogLeNet
- Inception
- 残差网络ResNet
- 残差块结构
卷积神经网络
全连接神经网络与卷积神经网络的区别
全连接神经网络是一种最为基础的前馈神经网络,他的每一个神经元都与前一层的所有神经元进行连接,这种连接方式可以使得全连接神经网络能够学习到更加全局的特征。
- 全连接:在全连接层中,每一个神经元都与前一层的所有神经元相连接,这以为这每一个神经元都收到来自前一层的所有输入的影响
- 权重独立:全连接神经网络的权重参数不共享,每一个连接都有自己的权重,这就导致需要更新大量的参数,而且可能导致过拟合
- 输出为以为向量:全连接的输出通常是一维向量
全连接神经网络需要更新的参数数量多,并且在全连接层的输入中,如果是对图像进行处理,比如mnist数据集案例,输入的是(通道1,高28,宽28像素)的图像,但是在全连接层中会被拉成(x=x.view(-1,784)
)784的数据进行输入,这种输入可能会导致图像丢失重要的空间信息,比如相邻的像素比较相似等。
卷积神经网络CNN更适合用在图像识别、语音识别等各种场合
卷积神经网络最大的特点是它的卷积层,可以通过共享权重和偏置项,实现对输入数据的局部区域进行操作
- 局部连接:卷积神经网络采用局部连接的方式,它的每一个神经元仅与输入数据的局部区域相连接
- 权重共享:卷积层的每一个神经元在整个输入数据上共享相同的权重参数,这就降低了模型参数的数量,也意味着卷积层可以学习到特定的特征,而不会受到位置的影响
- 特征映射:卷积神经网络的卷积层的输出是二维数组
概念性知识
- 卷积核的通道数量与输入的通道数量相同
- 卷积核的数量与输出的通道数量相同
一个卷积核的计算
m个卷积核的计算
mnist数据集(卷积神经网络)
# 使用卷积神经网络
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
batch_size=64
# 对图像进行处理,对原来图像从{0,255}-归一化->{0,1},把图像的维度变成c*w*h的
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,),(0.3081,))])
train_dataset=datasets.MNIST(root='./dataset/mnist/',train=True,download=True,transform=transform)
train_loader=DataLoader(train_dataset,shuffle=True,batch_size=batch_size)
test_dataset=datasets.MNIST(root='./dataset/mnist/',train=False,download=True,transform=transform)
test_loader=DataLoader(test_dataset,shuffle=False,batch_size=batch_size)
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.conv1=torch.nn.Conv2d(1,10,kernel_size=5)
self.pooling=torch.nn.MaxPool2d(2)
self.conv2=torch.nn.Conv2d(10,20,kernel_size=5)
self.fc=torch.nn.Linear(320,10)
def forward(self,x):
# 这里需要重新获取batch_size,因为划分小数据块的时候不一定是整除的,不然可能会出错
batch_size=x.size(0)
x=F.relu(self.pooling(self.conv1(x)))
# print(x.shape)
x=F.relu(self.pooling(self.conv2(x)))
# print(x.shape)
x=x.view(batch_size,-1)
# print(x.shape)
x=self.fc(x)
return x
model=Model()
criterion=torch.nn.CrossEntropyLoss()
# 带冲量为0.5
optimizer=optim.SGD(model.parameters(),lr=0.01,momentum=0.5)
# device=torch.device('cudo:0' if torch.cudo.is.available() else 'cpu')
# model.to(device)# 模型迁移到显卡中
def train(epoch):
running_loss=0.0
for batch_idx,data in enumerate(train_loader,0):
inputs,targets=data
# inputs,targets=inputs.to(device),targets.to(device)
# forward
y_pred=model(inputs)
loss=criterion(y_pred,targets)
# backward
optimizer.zero_grad()
loss.backward()
# update
optimizer.step()
running_loss+=loss.item()
# print('batch_idx:',batch_idx)
if batch_idx % 300 ==299:
print('[%d,%5d] loss:%.3f'%(epoch+1,batch_idx+1,running_loss/300))
running_loss=0.0
def test():
correct=0
total=0
# 不需要计算梯度
with torch.no_grad():
for data in test_loader:
images,labels=data
# inputs,labels=inputs.to(device),labels.to(device)
outputs=model(images)
# 取最大值的下标 dim表示维度
_,predicted=torch.max(outputs.data,dim=1)
total+=labels.size(0)
correct+=(predicted==labels).sum().item()
print('Accuracy on test set:%d %%'%(100*correct/total))
if __name__=='__main__':
for epoch in range(10):
train(epoch)
test()
GoogLeNet
Inception
# GooLeNet
import torch.nn as nn
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
batch_size=64
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,),(0.3081,))])
train_dataset=datasets.MNIST(root='./dataset/mnist/',train=True,download=True,transform=transform)
train_loader=DataLoader(train_dataset,shuffle=True,batch_size=batch_size)
test_dataset=datasets.MNIST(root='./dataset/mnist/',train=False,download=True,transform=transform)
test_loader=DataLoader(test_dataset,shuffle=False,batch_size=batch_size)
class InceptionA(nn.Module):
def __init__(self,in_channels):
super(InceptionA,self).__init__()
self.branch1x1=nn.Conv2d(in_channels,16,kernel_size=1)
self.branch5x5_1=nn.Conv2d(in_channels,16,kernel_size=1)
self.branch5x5_2=nn.Conv2d(16,24,kernel_size=5,padding=2)
self.branch3x3_1=nn.Conv2d(in_channels,16,kernel_size=1)
self.branch3x3_2=nn.Conv2d(16,24,kernel_size=3,padding=1)
self.branch3x3_3=nn.Conv2d(24,24,kernel_size=3,padding=1)
self.branch_pool=nn.Conv2d(in_channels,24,kernel_size=1)
def forward(self,x):
branch1x1=self.branch1x1(x)
branch5x5=self.branch5x5_1(x)
branch5x5=self.branch5x5_2(branch5x5)
branch3x3=self.branch3x3_1(x)
branch3x3=self.branch3x3_2(branch3x3)
branch3x3=self.branch3x3_3(branch3x3)
branch_pool=F.avg_pool2d(x,kernel_size=3,stride=1,padding=1)
branch_pool=self.branch_pool(branch_pool)
outputs=[branch1x1,branch5x5,branch3x3,branch_pool]
return torch.cat(outputs,dim=1)
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1=nn.Conv2d(1,10,kernel_size=5)
self.conv2=nn.Conv2d(88,20,kernel_size=5)
self.incep1=InceptionA(in_channels=10)
self.incep2=InceptionA(in_channels=20)
self.mp=nn.MaxPool2d(2)
self.fc=nn.Linear(1408,10)
def forward(self,x):
in_size=x.size(0)
x=F.relu(self.mp(self.conv1(x)))
x=self.incep1(x)
x=F.relu(self.mp(self.conv2(x)))
x=self.incep2(x)
x=x.view(in_size,-1)
x=self.fc(x)
return x
model=Net()
criterion=nn.CrossEntropyLoss()
optimizer=optim.SGD(model.parameters(),lr=0.01,momentum=0.5)
def train(epoch):
running_loss=0.0
for batch_idx,data in enumerate(train_loader,0):
inputs,targets=data
y_pred=model(inputs)
loss=criterion(y_pred,targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss+=loss.item()
if batch_idx % 300 ==299:
print('[%d,%5d] loss:%.3f'%(epoch+1,batch_idx+1,running_loss/300))
running_loss=0.0
def test():
correct=0
total=0
# 不需要计算梯度
with torch.no_grad():
for data in test_loader:
images,labels=data
# inputs,labels=inputs.to(device),labels.to(device)
outputs=model(images)
# 取最大值的下标 dim表示维度
_,predicted=torch.max(outputs.data,dim=1)
total+=labels.size(0)
correct+=(predicted==labels).sum().item()
print('Accuracy on test set:%d %%'%(100*correct/total))
if __name__=='__main__':
for epoch in range(10):
train(epoch)
test()
残差网络ResNet
随着神经网络的层数的增加,可能会出现梯度消失问题,而残差网络使用支线将输入直接连接到后面的层,使得后面的层可以直接学习残差
残差块结构
# 残差网络
import torch.nn as nn
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
class ResidualBlock(nn.Module):
def __init__(self,channels):
super(ResidualBlock,self).__init__()
self.channels=channels
# 输入与输出的通道数相同
self.conv1=nn.Conv2d(channels,channels,kernel_size=3,padding=1)
self.conv2=nn.Conv2d(channels,channels,kernel_size=3,padding=1)
def forward(self,x):
y=F.relu(self.conv1(x))
y=self.conv2(y)
return F.relu(x+y)
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1=nn.Conv2d(1,16,kernel_size=5)
self.conv2=nn.Conv2d(16,32,kernel_size=5)
self.mp=nn.MaxPool2d(2)
self.rblock1=ResidualBlock(16)
self.rblock2=ResidualBlock(32)
self.fc=nn.Linear(512,10)
def forward(self,x):
in_size=x.size(0)
x=self.mp(F.relu(self.conv1(x)))
x=self.rblock1(x)
x=self.mp(F.relu(self.conv2(x)))
x=self.rblock2(x)
x=x.view(in_size,-1)
x=self.fc(x)
return x
model=Net()
criterion=nn.CrossEntropyLoss()
optimizer=optim.SGD(model.parameters(),lr=0.01,momentum=0.5)
def train(epoch):
running_loss=0.0
for batch_idx,data in enumerate(train_loader,0):
inputs,targets=data
y_pred=model(inputs)
loss=criterion(y_pred,targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss+=loss.item()
if batch_idx % 300 ==299:
print('[%d,%5d] loss:%.3f'%(epoch+1,batch_idx+1,running_loss/300))
running_loss=0.0
def test():
correct=0
total=0
# 不需要计算梯度
with torch.no_grad():
for data in test_loader:
images,labels=data
# inputs,labels=inputs.to(device),labels.to(device)
outputs=model(images)
# 取最大值的下标 dim表示维度
_,predicted=torch.max(outputs.data,dim=1)
total+=labels.size(0)
correct+=(predicted==labels).sum().item()
print('Accuracy on test set:%d %%'%(100*correct/total))
if __name__=='__main__':
for epoch in range(10):
train(epoch)
test()