安装torch113、cuda116并运行demo【Transformer】

发布于:2023-09-14 ⋅ 阅读:(107) ⋅ 点赞:(0)

01. 导读

安装torch113、cuda116并运行demo【Transformer】

02. 显卡驱动版本

C:\Users\Administrator>nvidia-smi -l 10
Wed Sep 13 23:35:08 2023
±----------------------------------------------------------------------------+
| NVIDIA-SMI 512.89 Driver Version: 512.89 CUDA Version: 11.6 |
|-------------------------------±---------------------±---------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=++==============|
| 0 NVIDIA GeForce … WDDM | 00000000:01:00.0 On | N/A |
| N/A 73C P0 47W / N/A | 2210MiB / 4096MiB | 99% Default |
| | | N/A |
±------------------------------±---------------------±---------------------+

03. 创建环境、下载安装必要包

创建一个gpy38torch 的虚拟环境,并配置到改路径地址D:/AworkStation/Anaconda3/envs
conda create -p D:/AworkStation/Anaconda3/envs/gpy38torch python=3.8 【不知为何,管理员的windows身份了,仍然需要使用管理员身份运行】
pip install pandas transformers scipy ipykernel
pip install torch==1.13.0+cu116 --extra-index-url https://download.pytorch.org/whl/cu116
python -m ipykernel install --user --name gpy38torch

04. 运行参考代码:

# -*- coding: utf-8 -*-

'''
@Author   :   Corley Tang
@contact  :   cutercorleytd@gmail.com
@Github   :   https://github.com/corleytd
@Time     :   2023-08-14 22:22
@Project  :   Hands-on NLP with HuggingFace Transformers-sentiment_analysis_with_rbt3
使用3层RoBERTa模型进行评论情感分析
'''

# 导入所需的库
import pandas as pd
import torch
from torch import optim
from torch.utils.data import Dataset, DataLoader, random_split
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import pipeline
from transformers import set_seed



# 超参数
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed = 20230814
batch_size = 8
max_length = 256
lr = 2e-5
num_epochs = 2
log_interval = 100
train_ratio = 0.8
model_path = 'hfl/rbt3'
model_path = r'D:\Auser\YZH\Pytorch深度学习入门与实战\Models\rbt3'

# 设置随机种子、保证结果可复现
set_seed(seed)

# 1.构造数据
## (1)查看数据

# 读取酒店评论数据:https://github.com/SophonPlus/ChineseNlpCorpus
path = 'ChnSentiCorp_htl_all.csv'  # 在我当前路径
data = pd.read_csv(path)
data.head()
# 查看缺失值
data.info()  # review有1条缺失值
# 删除缺失值
data.dropna(inplace=True)
data.info()  # 不存在缺失值

## (2)构造数据集
# 定义数据集类
class ReviewDataset(Dataset):
    def __init__(self, path):
        super().__init__()
        self.data = pd.read_csv(path)
        self.data.dropna(inplace=True)

    def __len__(self):
        return self.data.shape[0]

    def __getitem__(self, index):
        item = self.data.iloc[index]
        return item['review'], item['label']
# 实例化
dataset = ReviewDataset(path)

for i in range(5):
    print(dataset[i])
# 划分数据集
sample_length = len(dataset)
train_length = int(train_ratio * sample_length)
train_set, valid_set = random_split(dataset, lengths=[train_length,
                                                      sample_length - train_length])  # PyTorch从1.13及以后的版本中也支持lengths使用浮点数比例
len(train_set), len(valid_set)
# 查看训练集
for i in range(5):
    print(train_set[i])

# (3)创建DataLoader
# 创建Tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_path)


def text_collate(batch):
    '''
    将单个样本数据组成的列表转换成一个批次的数据,通常会对数据进行一些处理
    :param batch: 一个批次数据的列表,一个元素为一条样本(包含输入和标签等)
    :return: 一个批次的数据,可以是一个列表、元组或者字典
    '''
    texts, labels = [], []
    for item in batch:
        texts.append(item[0])
        labels.append(item[1])

    # 先将数据整理成一批、再进行分词,效率更高
    inputs = tokenizer(texts, max_length=max_length, padding='max_length', truncation=True, return_tensors='pt')
    inputs['labels'] = torch.tensor(labels)

    return (inputs)
# 构造DataLoader
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, collate_fn=text_collate)  # 自定义数据处理方式
valid_loader = DataLoader(valid_set, batch_size=batch_size * 2, collate_fn=text_collate)
# 查看验证集
next(enumerate(valid_loader))[1]  # 为字典形式

# 2.搭建模型
## (1)创建模型
model = AutoModelForSequenceClassification.from_pretrained(model_path)  # 选择带序列分类头的模型
model.to(device)
model

## (2)定义优化器
optimizer = optim.AdamW(model.parameters(), lr=lr)
optimizer
# 3.训练与预测
# 评估
def evaluate():
    total_correct = 0  # 计数
    model.eval()
    with torch.inference_mode():  # 在推断模式下优化内存使用和计算量,以提高推断性能(只允许进行前向传播操作,不支持反向传播或梯度计算)
        for batch in valid_loader:
            batch = {k: v.to(device) for k, v in batch.items()}
            output = model(**batch)
            preds = output.logits.argmax(-1)
            total_correct += (preds == batch['labels']).sum().item()

    return total_correct / len(valid_set)


# 训练
def train():
    global_step = 0  # 计数
    for epoch in range(num_epochs):
        model.train()
        for batch in train_loader:
            batch = {k: v.to(device) for k, v in batch.items()}
            output = model(**batch)
            output.loss.backward()
            optimizer.step()
            optimizer.zero_grad()

            if global_step % log_interval == 0:
                print(f'Epoch: {epoch}, Step: {global_step:4d}, Loss: {output.loss.item():.6f}')
            global_step += 1
        acc = evaluate()
        print(f'Epoch: {epoch}, Acc: {acc:.2%}')
# 开始训练
train()
# 手动实现预测
review = '总体来说还是不错,不足之处可以谅解,毕竟价格放在这里,要求不能太高。'
id2label = {0: '差评', 1: '好评'}
model.eval()
with torch.inference_mode():
    inputs = tokenizer(review, return_tensors='pt')
    inputs = {k: v.to(device) for k, v in inputs.items()}
    logits = model(**inputs).logits
    pred = logits.argmax(-1).item()
    print(f'评论:{review}\n预测结果:{id2label.get(pred)}')
# 借助pipeline
model.config.id2label = id2label
pipe = pipeline('text-classification', model=model, tokenizer=tokenizer, device=device)
# 进行评价
pipe(review)

网站公告

今日签到

点亮在社区的每一天
去签到