大模型高效微调-prompt tuning
大模型微调分为几种,一是全量微调,这种微调方式需要有足够优秀的设备支持,在消费级硬件上对模型进行全部参数的微调几乎是不可能完成的任务。因此,研究者们提出了不同的参数高效迁移学习方法,即固定住的大部分参数,仅训练模型的一小部分参数来达到与全部参数的微调接近的效果,主要分为训练模型自身的参数和额外添加组件(仅仅微调训练少量或额外的模型参数)。这种微调方式可以在较小的GPU上实现,本文介绍的就是额外添加
微调背景
大模型微调分为几种,一是全量微调,这种微调方式需要有足够优秀的设备支持,在消费级硬件上对模型进行全部参数的微调几乎是不可能完成的任务。因此,研究者们提出了不同的参数高效迁移学习方法,即固定住的大部分参数,仅训练模型的一小部分参数来达到与全部参数的微调接近的效果,主要分为训练模型自身的参数和额外添加组件(仅仅微调训练少量或额外的模型参数)。这种微调方式可以在较小的GPU上实现,本文介绍的就是额外添加组件的方式来进行微调训练。
prompt-tuning理论
prompt-tuning给每个任务定义了自己的Prompt(一段提示文字),用于拼接到每条数据的开头,并且不需要加入 MLP 进行调整来解决难训练的问题。(论文:The Power of Scale for Parameter-Efficient Prompt Tuning)
实验代码
1. 导包
from transformers import AutoModelForCausalLM
from peft import get_peft_config, get_peft_model, PromptTuningInit, PromptTuningConfig, TaskType, PeftType
import torch
from datasets import load_dataset
import os
from transformers import AutoTokenizer
from torch.utils.data import DataLoader
from transformers import default_data_collator, get_linear_schedule_with_warmup
from tqdm import tqdm
from datasets import load_dataset
2. 微调配置
device = "cuda"
model_name_or_path = "bigscience/bloomz-560m" # 模型
tokenizer_name_or_path = "bigscience/bloomz-560m"
peft_config = PromptTuningConfig(
task_type=TaskType.CAUSAL_LM,
prompt_tuning_init=PromptTuningInit.TEXT,
num_virtual_tokens=8, # 添加的额外参数
prompt_tuning_init_text="Classify if the sencente is a negative or positive:", # prompt微调添加在每段数据前的提示词,表明这是一个什么任务
tokenizer_name_or_path=model_name_or_path, # 大模型配套的tokenize
)
dataset_name = "twitter_complaints"
text_column = "sentence"
label_column = "label"
max_length = 64
lr = 3e-2
num_epochs = 10
batch_size = 8
3. 加载数据
from datasets import load_dataset
dataset = load_dataset("glue", "sst2")
text_column = "sentence"
label_column = "label"
classes = [k.replace("_", " ") for k in dataset["train"].features["label"].names]
print(classes)
dataset = dataset.map(
lambda x: {"label": [classes[label] for label in x["label"]]},
batched=True,
num_proc=1,
)
print(dataset)
dataset["train"][0]
4. 数据预处理
# data preprocessing
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes])
print("target_max_length:", target_max_length)
# 预处理
def preprocess_function(examples):
batch_size = len(examples[text_column])
print("batch_size:", batch_size)
inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]]
targets = [str(x) for x in examples[label_column]]
model_inputs = tokenizer(inputs)
labels = tokenizer(targets)
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
label_input_ids = labels["input_ids"][i] + [tokenizer.pad_token_id]
if i == 0:
print(i, sample_input_ids, label_input_ids)
model_inputs["input_ids"][i] = sample_input_ids + label_input_ids
labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids
model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i])
#print(model_inputs)
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
label_input_ids = labels["input_ids"][i]
model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * (max_length - len(sample_input_ids)) + sample_input_ids
model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs["attention_mask"][i]
labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids
model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length])
model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length])
labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length])
if i == 0:
print("model_inputs input_ids:", model_inputs["input_ids"][i])
print("model_inputs attention_mask:", model_inputs["attention_mask"][i])
print("labels input_ids:", labels["input_ids"][i])
model_inputs["labels"] = labels["input_ids"]
return model_inputs
print("column_names:", dataset["train"].column_names)
# 将原始的训练和测试数据同时预处理,然后作为训练和评估数据集
processed_datasets = dataset.map(
preprocess_function,
batched=True,
num_proc=1,
remove_columns=dataset["train"].column_names,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["train"]
# 训练与评估使用同一份数据,但是训练数据打乱
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)
eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)
print(len(train_dataloader))
print(len(eval_dataloader))
5. 测试集预处理
def test_preprocess_function(examples):
batch_size = len(examples[text_column])
inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]]
model_inputs = tokenizer(inputs)
# print(model_inputs)
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids)) + sample_input_ids
model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs["attention_mask"][i]
model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length])
model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length])
return model_inputs
# 将原始的测试数据用于测试
test_dataset = dataset["test"].map(
test_preprocess_function,
batched=True,
num_proc=1,
remove_columns=dataset["train"].column_names,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
test_dataloader = DataLoader(test_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)
next(iter(test_dataloader))
6. 加载配置和输出参数
model = AutoModelForCausalLM.from_pretrained(model_name_or_path)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
-
AutoModelForCausalLM,默认使用的是自回归语言模型的交叉熵损失函数(CrossEntropyLoss)。具体来说,outputs = model(**batch) 后,outputs.loss 就是用 labels 和模型输出的 logits 计算的 CrossEntropyLoss,忽略了 label 为 -100 的位置(即只对需要预测的 token 计算损失)。
-
虚拟 token 数量为 8,trainable params 为 8,192,是因为每个虚拟 token 都有一个 embedding 向量,维度等于模型的 embedding size。
以 BLOOMZ-560m 为例,embedding size 是 1024:
虚拟 token 数量:8
每个虚拟 token 的参数数:1024
总可训练参数:8 × 1024 = 8,192
7. 加载优化器和训练
# model
# optimizer and lr scheduler
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=(len(train_dataloader) * num_epochs),
)
8. 训练
# training and evaluation
model = model.to(device)
for epoch in range(num_epochs):
model.train()
total_loss = 0
for step, batch in enumerate(tqdm(train_dataloader)):
batch = {k: v.to(device) for k, v in batch.items()}
# print(batch)
# print(batch["input_ids"].shape)
outputs = model(**batch)
loss = outputs.loss
total_loss += loss.detach().float()
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
eval_loss = 0
eval_preds = []
for step, batch in enumerate(tqdm(eval_dataloader)):
batch = {k: v.to(device) for k, v in batch.items()}
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
eval_loss += loss.detach().float()
eval_preds.extend(
tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True)
)
eval_epoch_loss = eval_loss / len(eval_dataloader)
eval_ppl = torch.exp(eval_epoch_loss)
train_epoch_loss = total_loss / len(train_dataloader)
train_ppl = torch.exp(train_epoch_loss)
print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")
9. 模型评估
# 模型评估
model.eval()
inputs = tokenizer(f'{text_column} : 很差 label : ', return_tensors="pt")
print(dataset["test"][i]["sentence"])
print(inputs)
with torch.no_grad():
inputs = {k: v.to(device) for k, v in inputs.items()}
outputs = model.generate(
input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3
)
print(outputs)
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
参考使用的博客和代码:https://zhuanlan.zhihu.com/p/646748939
更多推荐


所有评论(0)