一、概念
本文参考HuggingFace教程中的Prompt-based参数高效微调,探索如何基于prompt快速微调出我们的专属大模型。这篇文章中,我们将一起学习如何使用软提示方法训练因果语言模型,以应用于分类任务。我们知道,prompt提示可以描述任务或提供我们希望模型学习的任务示例。然而,软提示方法不是手动创建这些提示,而是向输入嵌入添加可学习参数,这些参数可以针对特定任务进行优化,同时保持预训练模型的参数不变。这使得对大型语言模型(LLM)进行新的下游任务微调既更快又更容易。
二、python实现
1、数据准备
这里,我们将使用RAFT数据集的twitter_complaints子集。twitter_complaints子集包含被标记为complaint和no complaint的推文(二分类)。我们使用load_dataset函数加载数据集,并创建一个新的text_label列,以便更容易理解Label值1和2的含义。
from datasets import load_dataset
ds = load_dataset("ought/raft", "twitter_complaints")
classes = [k.replace("_", " ") for k in ds["train"].features["Label"].names]
ds = ds.map(
lambda x: {"text_label": [classes[label] for label in x["Label"]]},
batched=True,
num_proc=1,
)
print(ds["train"][0])
加载一个分词器,定义要使用的填充token,并确定最大长度。
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes])
print(target_max_length)
创建一个预处理函数,该函数对推文文本和标签进行编码,在每个batch中填充输入和标签,创建一个注意力掩码,并将序列截断到max_length。
import torch
max_length = 64
def preprocess_function(examples, text_column="Tweet text", label_column="text_label"):
batch_size = len(examples[text_column])
inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]]
targets = [str(x) for x in examples[label_column]]
model_inputs = tokenizer(inputs)
labels = tokenizer(targets)
classes = [k.replace("_", " ") for k in ds["train"].features["Label"].names]
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
label_input_ids = labels["input_ids"][i]
model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * (
max_length - len(sample_input_ids)
) + sample_input_ids
model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[
"attention_mask"
][i]
labels["input_ids"][i] = [-100] * (max_length - len(label_input_ids)) + label_input_ids
model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length])
model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length])
labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length])
model_inputs["labels"] = labels["input_ids"]
return model_inputs
使用map函数将预处理函数应用于整个数据集,并删除未处理的列,因为模型不需要它们。
processed_ds = ds.map(
preprocess_function,
batched=True,
num_proc=1,
remove_columns=ds["train"].column_names,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
最后,创建一个训练和评估DataLoader。
from torch.utils.data import DataLoader
from transformers import default_data_collator
train_ds = processed_ds["train"]
eval_ds = processed_ds["test"]
batch_size = 16
train_dataloader = DataLoader(train_ds, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)
eval_dataloader = DataLoader(eval_ds, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)
2、模型准备
这里,我们加载一个预训练模型,用作软提示方法的基础模型——bigscience/bloomz-560m。当然,我们可以使用任何自己想要的因果语言模型。
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m")
3、P-tuning参数配置
P-tuning添加了一个可训练的嵌入张量,提示tokens可以添加到输入序列中的任何位置。接着,我们创建一个提示编码器配置(PromptEncoderConfig),并配置好参数。
from peft import PromptEncoderConfig, get_peft_model
peft_config = PromptEncoderConfig(task_type="CAUSAL_LM", num_virtual_tokens=20, encoder_hidden_size=128)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
4、模型训练
from transformers import get_linear_schedule_with_warmup
from tqdm import tqdm
lr = 3e-2
num_epochs = 50
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=(len(train_dataloader) * num_epochs),
)
device = "cuda"
model = model.to(device)
for epoch in range(num_epochs):
model.train()
total_loss = 0
for step, batch in enumerate(tqdm(train_dataloader)):
batch = {k: v.to(device) for k, v in batch.items()}
outputs = model(**batch)
loss = outputs.loss
total_loss += loss.detach().float()
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
eval_loss = 0
eval_preds = []
for step, batch in enumerate(tqdm(eval_dataloader)):
batch = {k: v.to(device) for k, v in batch.items()}
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
eval_loss += loss.detach().float()
eval_preds.extend(
tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True)
)
eval_epoch_loss = eval_loss / len(eval_dataloader)
eval_ppl = torch.exp(eval_epoch_loss)
train_epoch_loss = total_loss / len(train_dataloader)
train_ppl = torch.exp(train_epoch_loss)
print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")
5、模型应用
from peft import AutoPeftModelForCausalLM
model = AutoPeftModelForCausalLM.from_pretrained("peft_model_id").to("cuda")
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
i = 15
inputs = tokenizer(f'{text_column} : {ds["test"][i]["Tweet text"]} Label : ', return_tensors="pt")
print(ds["test"][i]["Tweet text"])
with torch.no_grad():
inputs = {k: v.to(device) for k, v in inputs.items()}
outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10)
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))