python打卡day35

发布于:2025-05-25 ⋅ 阅读:(23) ⋅ 点赞:(0)
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import time
import matplotlib.pyplot as plt
from tqdm import tqdm

# 设置GPU设备
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 加载鸢尾花数据集
iris = load_iris()
X = iris.data  # 特征数据
y = iris.target  # 标签数据

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 归一化数据
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

# 将数据转换为PyTorch张量并移至GPU
X_train = torch.FloatTensor(X_train).to(device)
y_train = torch.LongTensor(y_train).to(device)
X_test = torch.FloatTensor(X_test).to(device)
y_test = torch.LongTensor(y_test).to(device)

# 定义不同超参数的模型配置
configs = [
    {"hidden_size": 10, "lr": 0.01, "epochs": 20000},  # 原始配置
    {"hidden_size": 20, "lr": 0.01, "epochs": 20000},  # 增加隐藏层神经元
    {"hidden_size": 10, "lr": 0.05, "epochs": 20000},  # 提高学习率
    {"hidden_size": 10, "lr": 0.01, "epochs": 10000},  # 减少训练轮数
]

for config in configs:
    print(f"\n=== 当前配置: 隐藏层={config['hidden_size']}, 学习率={config['lr']}, 轮数={config['epochs']} ===")
    
    class MLP(nn.Module):
        def __init__(self):
            super(MLP, self).__init__()
            self.fc1 = nn.Linear(4, config['hidden_size'])  # 使用配置的隐藏层大小
            self.relu = nn.ReLU()
            self.fc2 = nn.Linear(config['hidden_size'], 3)

        def forward(self, x):
            out = self.fc1(x)
            out = self.relu(out)
            out = self.fc2(out)
            return out

    model = MLP().to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=config['lr'])
    
    # 训练模型
    losses = []
    with tqdm(total=config['epochs'], desc="训练进度", unit="epoch") as pbar:
        for epoch in range(config['epochs']):
            outputs = model(X_train)
            loss = criterion(outputs, y_train)
            
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            if (epoch + 1) % 200 == 0:
                losses.append(loss.item())
                pbar.set_postfix({'Loss': f'{loss.item():.4f}'})
            
            if (epoch + 1) % 1000 == 0:
                pbar.update(1000)
    
    # 评估模型
    model.eval()
    with torch.no_grad():
        outputs = model(X_test)
        _, predicted = torch.max(outputs, 1)
        correct = (predicted == y_test).sum().item()
        accuracy = correct / y_test.size(0)
        print(f'测试集准确率: {accuracy * 100:.2f}%')
使用设备: cpu

=== 当前配置: 隐藏层=10, 学习率=0.01, 轮数=20000 ===
训练进度: 100%|██████████| 20000/20000 [00:31<00:00, 633.99epoch/s, Loss=0.0607]
测试集准确率: 96.67%

=== 当前配置: 隐藏层=20, 学习率=0.01, 轮数=20000 ===
训练进度: 100%|██████████| 20000/20000 [00:35<00:00, 565.66epoch/s, Loss=0.0607]
测试集准确率: 96.67%

=== 当前配置: 隐藏层=10, 学习率=0.05, 轮数=20000 ===
训练进度: 100%|██████████| 20000/20000 [00:27<00:00, 724.62epoch/s, Loss=0.0476]
测试集准确率: 100.00%

=== 当前配置: 隐藏层=10, 学习率=0.01, 轮数=10000 ===
训练进度: 100%|██████████| 10000/10000 [00:16<00:00, 607.61epoch/s, Loss=0.0866]测试集准确率: 96.67%

@浙大疏锦行


网站公告

今日签到

点亮在社区的每一天
去签到