Kaggle数据集和提交链接
特征选择(主要修改地方)
在sample code的基础上主要修改了Select_feat
选择特征函数。
首先,因为数据集中的第一列是id,先在raw_x_train
,raw_x_valid
,raw_x_test
中都去掉这一列。其次,使用SelectKBest
根据特征与目标之间的相关性来选择10个最重要的特征。
def select_feat(train_data, valid_data, test_data, select_all = True):
# label
y_train = train_data[:, -1]
y_valid = valid_data[:, -1]
# feature
# 第一列是id
raw_x_train = train_data[:, 1:-1]
raw_x_valid = valid_data[:, 1:-1]
raw_x_test = test_data[:, 1:]
if select_all:
feat_idx = list(range(raw_x_train.shape[1]))
# 后续修改这里选择合适的特征
else:
# 使用SelectKBest根据特征与目标之间的相关性来选择k个最重要的特征
selector = SelectKBest(f_regression, k=10) #如果是回归问题可以使用f_regression,如果是分类问题可以使用f_classif
selector.fit(raw_x_train, y_train)
feat_idx = selector.get_support(indices=True) # 获取选中的特征的索引
return raw_x_train[:, feat_idx], raw_x_valid[:, feat_idx], raw_x_test[:, feat_idx], y_train, y_valid
蓝色为原始选择全部特征,红色为上述代码选择10个特征的结果,可以发现loss大大降低。
两次提交的分数如下,有很大的提升
参考作业划分的标准,已达到了strong baseline。
完整代码
完整代码如下:
import math
import numpy as np
import pandas as pd
import os
import csv
# 进度条
from tqdm import tqdm
# Pytorch
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset, random_split
# tensorboard
from torch.utils.tensorboard import SummaryWriter
# SelectKBest 用于特征选择
from sklearn.feature_selection import SelectKBest, f_regression
# 设置随机种子,保证实验的可重复性
def same_seed(seed):
# 设置 PyTorch 后端的 cuDNN 为确定性模式,保证每次运行结果一致
torch.backends.cudnn.deterministic = True
# 禁用 cuDNN 的自动优化,保证每次运行结果一致
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# 划分数据集
# 原数据中只有训练集和测试集,从训练集中划分出验证集
def train_valid_split(data_set, valid_ratio, seed):
valid_data_size = int(len(data_set) * valid_ratio)
train_data_size = len(data_set) - valid_data_size
train_data, valid_data = random_split(data_set, [train_data_size, valid_data_size], generator=torch.Generator().manual_seed(seed))
return np.array(train_data), np.array(valid_data)
# 选择特征,默认是选择全部的(117个)feature来做训练
# 后续可选择合适的特征来优化模型
def select_feat(train_data, valid_data, test_data, select_all = True):
# label
y_train = train_data[:, -1]
y_valid = valid_data[:, -1]
# feature
# 第一列是id
raw_x_train = train_data[:, 1:-1]
raw_x_valid = valid_data[:, 1:-1]
raw_x_test = test_data[:, 1:]
if select_all:
feat_idx = list(range(raw_x_train.shape[1]))
# 后续修改这里选择合适的特征
else:
# 使用SelectKBest根据特征与目标之间的相关性来选择k个最重要的特征
selector = SelectKBest(f_regression, k=10) #如果是回归问题可以使用f_regression,如果是分类问题可以使用f_classif
selector.fit(raw_x_train, y_train)
feat_idx = selector.get_support(indices=True) # 获取选中的特征的索引
return raw_x_train[:, feat_idx], raw_x_valid[:, feat_idx], raw_x_test[:, feat_idx], y_train, y_valid
# 数据集类
class COVID19Dataset(Dataset):
def __init__(self, features, targets=None):
# 做预测,不用label,只用features
if targets is None:
self.targets = targets # none
# 做训练,有label
else:
self.targets = torch.FloatTensor(targets)
self.features = torch.FloatTensor(features)
def __getitem__(self, idx):
if self.targets is None:
return self.features[idx]
else:
return self.features[idx], self.targets[idx]
def __len__(self):
return len(self.features)
# 神经网络模型
class My_Model(nn.Module):
def __init__(self, input_dim):
super(My_Model, self).__init__()
self.layers = nn.Sequential(
nn.Linear(input_dim, 16),
nn.ReLU(),
nn.Linear(16, 8),
nn.ReLU(),
nn.Linear(8, 1)
)
def forward(self, x):
x = self.layers(x)
x = x.squeeze(1) # (B, 1) -> (B)
return x
# 参数设置
device = 'cuda' if torch.cuda.is_available() else 'cpu'
config = {
'seed': 5201314,
'select_all': True,
'valid_ratio': 0.2,
'n_epochs': 3000,
'batch_size': 256,
'learning_rate': 1e-5,
'early_stop': 400, # 如果连续400个epoch验证集的loss都没有下降,就提前停止训练
'save_path': './models/model.ckpt'
}
# 训练过程
def trainer(train_loader, valid_loader, model, config, device):
criterion = nn.MSELoss(reduce='mean') # 默认为mean,计算所有元素的均值作为最终的损失值。
# momentum 可以帮助优化器在陡峭的曲面上更快地找到最优解。
# 例如,momentum=0.9 表示每次更新时,90%的更新量来自于上一次的更新方向,10%来自于当前的梯度方向。这样可以使得优化过程更加平滑和快速。
optimizer = torch.optim.SGD(model.parameters(), lr = config['learning_rate'], momentum=0.9)
writer = SummaryWriter()
if not os.path.isdir('./models'):
os.makedirs('./models')
n_epochs = config['n_epochs']
best_loss = math.inf #初始值设置为无穷大
step = 0
early_stop_count = 0
for epoch in range(n_epochs):
model.train()
loss_record = []
# train_loader 被封装以可视化训练进度。position=0表示进度条在最上面,leave=True表示训练完成后不清除进度条
train_pbar = tqdm(train_loader, position=0, leave=True)
"""训练循环"""
for x, y in train_pbar:
optimizer.zero_grad() # 梯度清零
x, y = x.to(device), y.to(device)
pred = model(x) # 前向传播
loss = criterion(pred, y) # 计算损失
loss.backward() # 反向传播
optimizer.step() # 更新参数
step += 1
loss_record.append(loss.item())
# 显示训练过程
train_pbar.set_description(f'Epoch {epoch + 1}/{n_epochs}')
train_pbar.set_postfix({'loss': loss.item()})
mean_train_loss = sum(loss_record) / len(loss_record)
writer.add_scalar('Loss/train', mean_train_loss, step)
"""验证循环"""
model.eval()
loss_record = []
for x, y in valid_loader:
x, y = x.to(device), y.to(device)
# 验证集不需要计算梯度
with torch.no_grad():
pred = model(x)
loss = criterion(pred, y)
loss_record.append(loss.item())
mean_valid_loss = sum(loss_record) / len(loss_record)
print(f'Epoch {epoch + 1}/{n_epochs}, Train loss: {mean_train_loss: .4f}, Valid loss: {mean_valid_loss: .4f}')
writer.add_scalar('Loss/valid', mean_valid_loss, step)
# 根据验证集的损失值保存最佳模型。
if mean_valid_loss < best_loss:
best_loss = mean_valid_loss
torch.save(model.state_dict(), config['save_path']) # Save your best model
print('Saving model with loss {:.3f}...'.format(best_loss))
early_stop_count = 0
else:
early_stop_count += 1
if early_stop_count >= config['early_stop']:
print("\n Model is not improving, so we halt the training process.")
return
"""准备工作"""
# 设置随机种子
same_seed(config['seed'])
# 读取数据
train_data = pd.read_csv('./covid.train.csv').values
test_data = pd.read_csv('./covid.test.csv').values
# 划分数据集
train_data, valid_data = train_valid_split(train_data, config['valid_ratio'], config['seed'])
print(f"""train data size: {len(train_data)}, valid data size: {len(valid_data)}, test data size: {len(test_data)}""")
# 选择特征
x_train, x_valid, x_test, y_train, y_valid = select_feat(train_data, valid_data, test_data, config['select_all'])
print(f"""The number of features: {x_train.shape[1]}""")
# 构造数据集
train_dataset = COVID19Dataset(x_train, y_train)
valid_dataset = COVID19Dataset(x_valid, y_valid)
test_dataset = COVID19Dataset(x_test)
# dataloader
train_loader = DataLoader(train_dataset, batch_size=config['batch_size'], shuffle=True, pin_memory=True)
valid_loader = DataLoader(valid_dataset, batch_size=config['batch_size'], shuffle=True, pin_memory=True)
test_loader = DataLoader(test_dataset, batch_size=config['batch_size'], shuffle=False, pin_memory=True)
# 开始训练
model = My_Model(input_dim=x_train.shape[1]).to(device)
trainer(train_loader, valid_loader, model, config, device)
# 预测
def predict(test_loader, model, device):
model.eval()
preds = []
for x in tqdm(test_loader):
x = x.to(device)
with torch.no_grad():
pred = model(x)
preds.append(pred.detach().cpu())
preds = torch.cat(preds, dim=0).numpy()
return preds
def save_pred(preds, file):
with open(file, 'w') as fp:
writer = csv.writer(fp)
writer.writerow(['id', 'tested_positive'])
for i, p in enumerate(preds):
writer.writerow([i, p])
# 预测并保存结果
model = My_Model(input_dim=x_test.shape[1]).to(device)
model.load_state_dict(torch.load(config['save_path']))
preds = predict(test_loader, model, device)
save_pred(preds, './pred.csv')