深度学习笔记39_Pytorch文本分类入门

发布于:2025-04-20 ⋅ 阅读:(14) ⋅ 点赞:(0)

一、我的环境

1.语言环境:Python 3.8

2.编译器:Pycharm

3.深度学习环境:

  • torch==1.12.1+cu113
  • torchvision==0.13.1+cu113

、导入数据

import torch
import torch.nn as nn
import torchvision
from torchvision import transforms, datasets
import os,PIL,pathlib,warnings
 
warnings.filterwarnings("ignore")             #忽略警告信息
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

from torchtext.datasets import AG_NEWS
train_iter = AG_NEWS(split='train')      # 加载 AG News 数据集

、构建词典

from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
 
tokenizer  = get_tokenizer('basic_english') # 返回分词器函数
 
def yield_tokens(data_iter):
    for _, text in data_iter:
        yield tokenizer(text)
 
vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=["<unk>"])
vocab.set_default_index(vocab["<unk>"]) # 设置默认索引,如果找不到单词,则会选择默认索引
print(vocab(['here', 'is', 'an', 'example']))

结果: [475, 21, 30, 5297]

text_pipeline  = lambda x: vocab(tokenizer(x))
label_pipeline = lambda x: int(x) - 1
print(text_pipeline('here is the an example'))
结果:[475, 21, 2, 30, 5297]
print(label_pipeline('10'))
结果:10

生成数据批次和迭代器

from torch.utils.data import DataLoader
 
def collate_batch(batch):
    label_list, text_list, offsets = [], [], [0]
    
    for (_label, _text) in batch:
        # 标签列表
        label_list.append(label_pipeline(_label))
        
        # 文本列表
        processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)
        text_list.append(processed_text)
        
        # 偏移量,即语句的总词汇量
        offsets.append(processed_text.size(0))
        
    label_list = torch.tensor(label_list, dtype=torch.int64)
    text_list  = torch.cat(text_list)
    offsets    = torch.tensor(offsets[:-1]).cumsum(dim=0) #返回维度dim中输入元素的累计和
    
    return label_list.to(device), text_list.to(device), offsets.to(device)
 
# 数据加载器
dataloader = DataLoader(train_iter,
                        batch_size=8,
                        shuffle   =False,
                        collate_fn=collate_batch)

定义模型

from torch import nn
 
class TextClassificationModel(nn.Module):
 
    def __init__(self, vocab_size, embed_dim, num_class):
        super(TextClassificationModel, self).__init__()
        
        self.embedding = nn.EmbeddingBag(vocab_size,   # 词典大小
                                         embed_dim,    # 嵌入的维度
                                         sparse=False) # 
        
        self.fc = nn.Linear(embed_dim, num_class)
        self.init_weights()
 
    def init_weights(self):
        initrange = 0.5
        self.embedding.weight.data.uniform_(-initrange, initrange)
        self.fc.weight.data.uniform_(-initrange, initrange)
        self.fc.bias.data.zero_()
 
    def forward(self, text, offsets):
        embedded = self.embedding(text, offsets)
        return self.fc(embedded)

定义实例

num_class  = len(set([label for (label, text) in train_iter]))
vocab_size = len(vocab)
em_size     = 64
model      = TextClassificationModel(vocab_size, em_size, num_class).to(device)

定义训练函数与评估函数

import time
 
def train(dataloader):
    model.train()  # 切换为训练模式
    total_acc, train_loss, total_count = 0, 0, 0
    log_interval = 500
    start_time   = time.time()
 
    for idx, (label, text, offsets) in enumerate(dataloader):
        
        predicted_label = model(text, offsets)
        
        optimizer.zero_grad()                    # grad属性归零
        loss = criterion(predicted_label, label) # 计算网络输出和真实值之间的差距,label为真实值
        loss.backward()                          # 反向传播
        optimizer.step()  # 每一步自动更新
        
        # 记录acc与loss
        total_acc   += (predicted_label.argmax(1) == label).sum().item()
        train_loss  += loss.item()
        total_count += label.size(0)
        
        if idx % log_interval == 0 and idx > 0:
            elapsed = time.time() - start_time
            print('| epoch {:1d} | {:4d}/{:4d} batches '
                  '| train_acc {:4.3f} train_loss {:4.5f}'.format(epoch, idx, len(dataloader),
                                              total_acc/total_count, train_loss/total_count))
            total_acc, train_loss, total_count = 0, 0, 0
            start_time = time.time()
 
def evaluate(dataloader):
    model.eval()  # 切换为测试模式
    total_acc, train_loss, total_count = 0, 0, 0
 
    with torch.no_grad():
        for idx, (label, text, offsets) in enumerate(dataloader):
            predicted_label = model(text, offsets)
            
            loss = criterion(predicted_label, label)  # 计算loss值
            # 记录测试数据
            total_acc   += (predicted_label.argmax(1) == label).sum().item()
            train_loss  += loss.item()
            total_count += label.size(0)
            
    return total_acc/total_count, train_loss/total_count

 结果:

| epoch 1 | 500/1782 batches| train_acc 0.901 train_loss 0.00458
| epoch 1 | 1000/1782 batches| train_acc 0.905 train_loss 0.00438
| epoch 1 | 1500/1782 batches| train_acc 0.908 train_loss 0.00437
---------------------------------------------------------------------
| epoch 1 | time:6.30s |valid_acc 0.907 | valid_loss 0.004
---------------------------------------------------------------------
| epoch 2 | 500/1782 batches| train_acc 0.917 train_loss 0.00381
| epoch 2 | 1000/1782 batches| train_acc 0.917 train_loss 0.00383
| epoch 2 | 1500/1782 batches| train_acc 0.917 train_loss 0.00386
---------------------------------------------------------------------
| epoch 2 | time:6.26s |valid_acc 0.911 | valid_loss 0.004
---------------------------------------------------------------------
| epoch 3 | 500/1782 batches| train_acc 0.929 train_loss 0.00330
| epoch 3 | 1000/1782 batches| train_acc 0.927 train_loss 0.00340
| epoch 3 | 1500/1782 batches| train_acc 0.923 train_loss 0.00354
---------------------------------------------------------------------
| epoch 3 | time:6.21s |valid_acc 0.935 | valid_loss 0.003
---------------------------------------------------------------------
| epoch 4 | 500/1782 batches| train_acc 0.933 train_loss 0.00306
| epoch 4 | 1000/1782 batches| train_acc 0.932 train_loss 0.00311
| epoch 4 | 1500/1782 batches| train_acc 0.929 train_loss 0.00318
---------------------------------------------------------------------
| epoch 4 | time:6.22s |valid_acc 0.916 | valid_loss 0.003
---------------------------------------------------------------------
| epoch 5 | 500/1782 batches| train_acc 0.948 train_loss 0.00253
| epoch 5 | 1000/1782 batches| train_acc 0.949 train_loss 0.00242
| epoch 5 | 1500/1782 batches| train_acc 0.951 train_loss 0.00238
---------------------------------------------------------------------
| epoch 5 | time:6.23s |valid_acc 0.954 | valid_loss 0.002
---------------------------------------------------------------------
| epoch 6 | 500/1782 batches| train_acc 0.951 train_loss 0.00241
| epoch 6 | 1000/1782 batches| train_acc 0.952 train_loss 0.00236
| epoch 6 | 1500/1782 batches| train_acc 0.952 train_loss 0.00235
---------------------------------------------------------------------
| epoch 6 | time:6.26s |valid_acc 0.954 | valid_loss 0.002
---------------------------------------------------------------------
| epoch 7 | 500/1782 batches| train_acc 0.954 train_loss 0.00228
| epoch 7 | 1000/1782 batches| train_acc 0.951 train_loss 0.00238
| epoch 7 | 1500/1782 batches| train_acc 0.954 train_loss 0.00228
---------------------------------------------------------------------
| epoch 7 | time:6.26s |valid_acc 0.954 | valid_loss 0.002
---------------------------------------------------------------------
| epoch 8 | 500/1782 batches| train_acc 0.953 train_loss 0.00227
| epoch 8 | 1000/1782 batches| train_acc 0.955 train_loss 0.00224
| epoch 8 | 1500/1782 batches| train_acc 0.954 train_loss 0.00224
---------------------------------------------------------------------
| epoch 8 | time:6.32s |valid_acc 0.954 | valid_loss 0.002
---------------------------------------------------------------------
| epoch 9 | 500/1782 batches| train_acc 0.955 train_loss 0.00218
| epoch 9 | 1000/1782 batches| train_acc 0.953 train_loss 0.00227
| epoch 9 | 1500/1782 batches| train_acc 0.955 train_loss 0.00227
---------------------------------------------------------------------
| epoch 9 | time:6.24s |valid_acc 0.954 | valid_loss 0.002
---------------------------------------------------------------------
| epoch 10 | 500/1782 batches| train_acc 0.952 train_loss 0.00229
| epoch 10 | 1000/1782 batches| train_acc 0.955 train_loss 0.00220
| epoch 10 | 1500/1782 batches| train_acc 0.956 train_loss 0.00220
---------------------------------------------------------------------
| epoch 10 | time:6.29s |valid_acc 0.954 | valid_loss 0.002
---------------------------------------------------------------------

定义训练函数与评估函数

print('Checking the results of test dataset.')
test_acc, test_loss = evaluate(test_dataloader)
print('test accuracy {:8.3f}'.format(test_acc))
Checking the results of test dataset.
test accuracy    0.905

总结: 

  1. 预训练词向量:使用GloVe、FastText等预训练词向量能显著提升性能

  2. 正则化:合理使用dropout、权重衰减等技术防止过拟合

  3. 超参数调优:学习率、批大小、隐藏层维度等对模型性能影响很大

  4. 迁移学习:对于小数据集,考虑使用BERT等预训练模型进行微调


网站公告

今日签到

点亮在社区的每一天
去签到