前面几讲已经构建了LLM的架构,接下来就是对模型进行预训练了。在此,我们将实现训练循环代码、性能评估以及加载和保存模型权重等步骤。

1、训练大语言模型

1、首先,定义字典GPT_CONFIG_124M,用于模型初始化,由于使得是笔记本,将上下文长度context_length设为128,减少训练模型的计算需求。此外,还引入两个辅助函数,text_to_token_ids和token_ids_to_text用于词元ID和文本之间的相互转换。

GPT_CONFIG_124M = {
    "vocab_size": 50257,   #词汇表大小
    "context_length": 128, #上下文长度
    "emb_dim": 768,         #嵌入维度
    "num_heads": 12,        #注意力头数量
    "num_layers": 12,       #层数
    "drop_rate": 0.1,       #dropout率
    "qkv_bias": False       #查询-键-值偏置
}
def text_to_token_ids(text, tokenizer):
    encoded = tokenizer.encode(text, allowed_special={'<|endoftext|>'})
    encoded_tensor = torch.tensor(encoded).unsqueeze(0) #添加batch维度
    return encoded_tensor

def token_ids_to_text(token_ids, tokenizer):
    flat = token_ids.squeeze(0)  #移除batch维度
    return tokenizer.decode(flat.tolist())

2、利用第二讲的数据加载器构建dataloader,设置训练集与验证集比例为9:1,batch size为2

#加载并划分数据集, 9:1比例
file_path = r"the-verdict.txt"
with open(file_path, "r", encoding="utf-8") as f:
    texts = f.read()
train_ratio = 0.90
split_idx = int(train_ratio * len(texts))
train_data = texts[:split_idx]
val_data = texts[split_idx:]

#利用第二章的create_dataloder创建数据加载器
from BPE import create_dataloder
train_loader = create_dataloder(train_data,
                               batch_size=2,
                               max_length=GPT_CONFIG_124M["context_length"],
                               stride=GPT_CONFIG_124M["context_length"],
                               shuffle=True,
                               drop_last=True,
                               num_works=0)
val_loader = create_dataloder(val_data,
                               batch_size=2,
                               max_length=GPT_CONFIG_124M["context_length"],
                               stride=GPT_CONFIG_124M["context_length"],
                               shuffle=False,
                               drop_last=False,
                               num_works=0)

3、定义损失函数

为了比较模型输出的logits和target之间的差距,我们选择了经典的交叉熵损失。需要注意的是,模型输出的logits张量形状为(batch_size, num_tokens, vocab_size),分别为批处理大小、词元数量和词汇表大小,而target张量形状为(batch_size, num_tokens),分别为批处理大小和词元数量。对于pytorch中的交叉熵函数,希望通过在batch维度上将它们组合在一起来展平这些张量。得到的维度分别为(batch_size,vocab_size)和(batch_size)。

下面三个函数,calc_loss_batch计算了单个batch时的损失值,cal_loss_loader用于计算训练集或验证集层面上的损失值,可以通过评估函数evaluate_model来计算训练集和验证集的损失,此时因为是评估模式下,所以是禁止梯度更新的。

#计算交叉熵损失
def calc_loss_batch(input_batch, target_barch, model, device):
    input_batch = input_batch.to(device)
    target_barch = target_barch.to(device)  #shape:(batch_size, num_tokens)
    logits = model(input_batch)             #shape:(batch_size, num_tokens, vocab_size)
    loss = nn.functional.cross_entropy(
        logits.flatten(0, 1), target_barch.flatten() #在batch维度上展平,(batch_size,vocab_size)(batch_size)
    )

    return loss

#计算训练集和验证集损失
def calc_loss_loader(data_loader, model, device, num_batches=None):
    total_loss = 0.
    if len(data_loader) == 0:
        return float("nan")
    elif num_batches is None:   #没有指定num_batches时,遍历所有批次
        num_batches = len(data_loader)
    else:                       #防止指定的num_batches超过最大总批次
        num_batches = min(num_batches, len(data_loader))

    for i, (input_batch, target_batch) in enumerate(data_loader):
        if i >= num_batches:
            break
        loss = calc_loss_batch(input_batch, target_batch, model, device)
        total_loss += loss #每个batch的损失总和

    return total_loss / num_batches #所有batch的损失平均值

#打印训练集验证集损失,用于评估性能,评估模式下,禁用梯度跟踪和dropout
def evaluate_model(model, train_loader, val_loader, device, eval_iter):
    model.eval()
    with torch.no_grad():
        train_loss = calc_loss_loader(train_loader, model, device, num_batches=eval_iter)
        val_loss = calc_loss_loader(val_loader, model, device, num_batches=eval_iter)

    model.train()
    return train_loss, val_loss

此外,还增加了一个打印文本的辅助函数。

from gpt2 import generate_text_simple #第四讲生成文本样本

#生成和打印文本, 跟踪模型在训练时是否由改进,输入文本片段start_text,转换成词元ID,然后提供给模型
def generate_and_print_sample(model, tokenizer, device, start_context):
    model.eval()
    context_size = model.pos_emb.weight.shape[0]
    encoded = text_to_token_ids(start_context, tokenizer).to(device)
    with torch.no_grad():
        token_idx = generate_text_simple(model, encoded, max_new_tokens=50, context_size=context_size)
    decoded_text = token_ids_to_text(token_idx, tokenizer)
    print(decoded_text.replace("\n", " "))
    model.train()

4、开始训练。使用上一讲介绍的gpt2模型,在此只是用于演示,所以使用epochs为10。整体的步骤包括遍历训练轮次(epochs)、在每个轮次中遍历批次(batch)、从上一个batch迭代中重置损失梯度、计算损失、反向传播以计算损失梯度、更新模型权重、打印训练集和验证集损失以及生成文本样本用于可视化。

from gpt2 import GPTModel #使用第四讲构建的gpt模型

#预训练LLM主函数
def train_model_simple(model, train_loader, val_loader, optimizer, device, epochs,
                       eval_freq, eval_iter, start_context, tokenizer):
    train_losses, val_losses, track_tokens_seen = [], [], []
    tokens_seen, global_step = 0, -1

    for epoch in range(epochs):
        model.train()
        for input_batch, target_batch in train_loader:
            optimizer.zero_grad()
            loss = calc_loss_batch(input_batch, target_batch, model, device)
            loss.backward()
            optimizer.step()
            tokens_seen += input_batch.numel()
            global_step += 1

            #可选,评估步骤
            if global_step % eval_freq == 0:
                train_loss, val_loss = evaluate_model(model, train_loader, val_loader, device, eval_iter)
                train_losses.append(train_loss)
                val_losses.append(val_loss)
                track_tokens_seen.append(tokens_seen)
                print(f"Ep {epoch + 1} (Step {global_step:06d}: Tran loss: {train_loss:.3f}, Val loss: {val_loss:.3f}")

        #每轮之后打印一个文本样本
        generate_and_print_sample(model, tokenizer, device, start_context)

    return train_losses, val_losses, track_tokens_seen

#开始训练
#torch.manual_seed(123)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = GPTModel(GPT_CONFIG_124M)
model.to(device)
optimizer = torch.optim.AdamW(
    model.parameters(),
    lr=0.004,
    weight_decay=0.1
)
epochs = 10
tokenizer = tiktoken.get_encoding("gpt2")
train_losses, val_losses, token_seen = train_model_simple(
    model, train_loader, val_loader, optimizer, device,
    epochs=epochs, eval_freq=5, eval_iter=5,
    start_context="Every effort moves you", tokenizer=tokenizer
)

2、文本生成策略

文本生成策略,也叫解码策略,以生成更具有原创性的文本。之前使用generate_text_sample函数生成词元,是从词汇表所有词元中选择概率最大的一个。这意味着只要输入相同的起始上下文,模型也始终生成相同的输出。在此,主要使用解码策略中的温度缩放和Top-k采样两个技术来优化上述函数。

温度缩放是一种在下一个词元生成任务中添加概率选择过程的技术。之前使用torch.argmax选择概率最高的词元作为下一个词元,为了生成更多样化的文本,可以用一个从概率分布(LLM在每个词元生成步骤为每个词汇条目生成的概率分数)中采样的函数来代替argmax。

torch.multinomial替换argmax,实现一个概率采样的过程,会是模型生成的下一个词元不固定。然后通过温度缩放,可以进一步控制分布和选择过程。即将生成的logits除以一个大于0的数。温度大于1会导致词元概率更加均匀分布,小于1会导致更加自信(更尖锐或更陡峭)的分布。

结合温度缩放的概率方法,虽然会增加更多样化的结果,但是有时候也会带来语法不正确或者无意义的输出,增加Top-k采样会改善文本生成结果。Top-k采样中,可以将采样的词元限制在前k个最有可能的词元上,通过掩码概率分数的方式排除其他词元。

结合以上两个技术,修改文本生成函数:

#结合温度缩放和top_k采样技术的文本生成函数
def generate(model, idx, max_new_tokens, context_size, temperature=0.0, top_k=None, eos_id=None):
    for _ in range(max_new_tokens):
        idx_cond = idx[:, -context_size:]
        with torch.no_grad():
            logits = model(idx_cond)
        logits = logits[:, -1, :]
        if top_k is not None:       #使用top_k
            top_logits, _ = torch.topk(logits, top_k) #选取前top_k个词元,降序排列
            min_val = top_logits[:, -1] 
            ##其他的用-inf代替,非前k的词元概率分数为0, 剩余的概率总和为1
            logits = torch.where(           
                logits < min_val,
                torch.tensor(float('-inf').to(logits.device)),
                logits
            )
        if temperature > 0.0:       #使用温度缩放
            logits = logits / temperature
            probs = torch.softmax(logits, dim=-1)
            idx_next = torch.multinomial(probs, num_samples=1)
        else:                       #禁用温度缩放,用以前一样的
            idx_next = torch.argmax(logits, dim=-1, keepdim=True)
        if idx_next == eos_id:        #遇到结束词元,提前停止生成
            break
        idx = torch.cat((idx, idx_next), dim=1)
        
        return idx

3、使用pytorch加载和保存模型权重

torch.save可以直接保存模型的权重等信息。像AdamW这样的自适应优化器可以保存学习率、动量等缓存信息,若没有这些信息,优化器就会重置,即有可能模型学习效果不佳。

#保存模型权重
torch.save({
    "model_state_dict": model.state_dict(),       #模型的所有可学习参数(即权重和偏置)的字典
    "optimizer_state_dict": optimizer.state_dict(),     #优化器的状态字典
    },
    
    "model_and_optimizer.pth"
)

加载权重时,torch.load可以加载保存的数据,load_state_dict可以恢复模型和优化器状态。

#加载保存的权重
checkpoint = torch.load("model_and_optimizer.pth", map_location=device)
model = GPTModel(GPT_CONFIG_124M)
model.load_state_dict(checkpoint["model_state_dict"])
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-4, weight_decay=0.4)
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
model.train()

3、从openAI加载预训练权重

OpenAI是通过TensorFlow保存的GPT-2的权重,所以要提前安装好这些库。

这里直接使用gpt_download.py脚本的下载函数下载gpt2的权重

#下载下载权重的脚本
if not os.path.exists('gpt_download.py'):
    import urllib.request
    url = "https://raw.githubusercontent.com/rasbt/LLMs-from-scratch/main/ch05/" \
              "01_main-chapter-code/gpt_download.py"
    firename = url.split('/')[-1]
    urllib.request.urlretrieve(url, firename)

#下载权重
from gpt_download import download_and_load_gpt2
settings, params = download_and_load_gpt2(model_size='124M', models_dir="gpt2")

在加载权重之前,首先设置模型参数,以及实例化模型。不同类型的gpt2其整体结构相同,只是在内部的如transformer block等模块上的数量不同。然后修改下之前的一些参数设置,以符合gpt2模型的架构。

model_configs = {
        "gpt2-small (124M)": {"emb_dim": 768, "num_layers": 12, "num_heads": 12},
        "gpt2-medium (355M)": {"emb_dim": 1024, "num_layers": 24, "num_heads": 16},
        "gpt2-large (774M)": {"emb_dim": 1280, "num_layers": 36, "num_heads": 20},
        "gpt2-xl (1558M)": {"emb_dim": 1600, "num_layers": 48, "num_heads": 25}
    }
model_name = "gpt2-small (124M)"
NEW_CONFIG = GPT_CONFIG_124M.copy()
NEW_CONFIG.update(model_configs[model_name])
NEW_CONFIG.update({"context_length": 1024})
#openai在多头注意力使用了偏置,(其实不必要,目前LLM中不常用)
NEW_CONFIG.update({"qkv_bias": True})

#初始化GPTModel
gpt = GPTModel(NEW_CONFIG)
gpt.eval()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = tiktoken.get_encoding("gpt2")

引入分配权重的检查函数assign,检查我们实例化的GPTModel和openAI的gpt2的返回权重张量是否形状一致,如果一致,返回right张量,即加载的权重参数params。

#分配权重的工具函数,检查shape是否一致,并将right张量返回为可训练参数
def assign(left, right):
    if left.shape != right.shape:
        raise ValueError( f"shape mismatch. left:{left.shape}, right:{right.shape}")
    return torch.nn.Parameter(torch.tensor(right))

接下来实现逐层导入加载的参数权重

#将权重加载到gptModel中
def load_weight_into_gpt(gpt, params):
    #嵌入层权重
    gpt.pos_emb.weight = assign(gpt.pos_emb.weight, params["wpe"])
    gpt.token_emb.weight = assign(gpt.token_emb.weight, params["wte"])
    
    #逐层加载transformer块的权重
    for b in range(len(params["blocks"])):
        #将注意力和偏置权重平均分成3部分,用于q、k、v
        q_w, k_w, v_w = np.split((params["blocks"][b]["attn"]["c_attn"])["w"], 3, axis=-1)
        gpt.transformer_blocks[b].atten.w_q.weight = assign(gpt.transformer_blocks[b].atten.w_q.weight, q_w.T)
        gpt.transformer_blocks[b].atten.w_k.weight = assign(gpt.transformer_blocks[b].atten.w_k.weight, k_w.T)
        gpt.transformer_blocks[b].atten.w_v.weight = assign(gpt.transformer_blocks[b].atten.w_v.weight, v_w.T)
        #注意力层偏置加载
        q_b, k_b, v_b = np.split((params["blocks"][b]["attn"]["c_attn"])["b"], 3, axis=-1)
        gpt.transformer_blocks[b].atten.w_q.bias = assign(gpt.transformer_blocks[b].atten.w_q.bias, q_b)
        gpt.transformer_blocks[b].atten.w_k.bias = assign(gpt.transformer_blocks[b].atten.w_k.bias, k_b)
        gpt.transformer_blocks[b].atten.w_v.bias = assign(gpt.transformer_blocks[b].atten.w_v.bias, v_b)
        #注意力输出层
        gpt.transformer_blocks[b].atten.out_proj.weight = assign(gpt.transformer_blocks[b].atten.out_proj.weight,
                                                       params["blocks"][b]["attn"]["c_proj"]["w"].T)
        gpt.transformer_blocks[b].atten.out_proj.bias = assign(gpt.transformer_blocks[b].atten.out_proj.bias,
                                                     params["blocks"][b]["attn"]["c_proj"]["b"])
        #注意力前馈网络
        gpt.transformer_blocks[b].feedforward.layers[0].weight = assign(gpt.transformer_blocks[b].feedforward.layers[0].weight,
                                                                params["blocks"][b]["mlp"]["c_fc"]["w"].T)
        gpt.transformer_blocks[b].feedforward.layers[0].bias = assign(gpt.transformer_blocks[b].feedforward.layers[0].bias,
                                                              params["blocks"][b]["mlp"]["c_fc"]["b"])
        gpt.transformer_blocks[b].feedforward.layers[2].weight = assign(gpt.transformer_blocks[b].feedforward.layers[2].weight,
                                                                params["blocks"][b]["mlp"]["c_proj"]["w"].T)
        gpt.transformer_blocks[b].feedforward.layers[2].bias = assign(gpt.transformer_blocks[b].feedforward.layers[2].bias,
                                                              params["blocks"][b]["mlp"]["c_proj"]["b"])
        #层归一化层
        gpt.transformer_blocks[b].layerNorm1.scale = assign(gpt.transformer_blocks[b].layerNorm1.scale,
                                               params["blocks"][b]["ln_1"]["g"])
        gpt.transformer_blocks[b].layerNorm1.shift = assign(gpt.transformer_blocks[b].layerNorm1.shift,
                                               params["blocks"][b]["ln_1"]["b"])
        gpt.transformer_blocks[b].layerNorm2.scale = assign(gpt.transformer_blocks[b].layerNorm2.scale,
                                               params["blocks"][b]["ln_2"]["g"])
        gpt.transformer_blocks[b].layerNorm2.shift = assign(gpt.transformer_blocks[b].layerNorm2.shift,
                                               params["blocks"][b]["ln_2"]["b"])
        #最终层归一化层和输出头
        gpt.final_norm.scale = assign(gpt.final_norm.scale, params["g"])
        gpt.final_norm.shift = assign(gpt.final_norm.shift, params["b"])
        gpt.out_head.weight = assign(gpt.out_head.weight, params["wte"])

最后调用加载权重函数,并且生成样本测试

#加载权重
    load_weight_into_gpt(gpt, params)
    gpt.to(device)
    #生成文本
    torch.manual_seed(123)
    token_ids = generate(
        model=gpt,
        idx=text_to_token_ids("Every effort moves you", tokenizer).to(device),
        max_new_tokens=25,
        context_size=NEW_CONFIG["context_length"],
        top_k=50,
        temperature=1.5
    )
    print("output text:\n", token_ids_to_text(token_ids, tokenizer))

Logo

有“AI”的1024 = 2048,欢迎大家加入2048 AI社区

更多推荐