gpt2使用ggml推理
ggml/examples/gpt-2/main-backend.cpp :
#include "ggml/ggml.h"
#include "ggml/ggml-alloc.h"
#include "ggml/ggml-backend.h"
#ifdef GGML_USE_CUDA
#include "ggml-cuda.h"
#endif
#ifdef GGML_USE_METAL
#include "ggml-metal.h"
#endif
#include "common.h"
#include "common-ggml.h"
#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <fstream>
#include <map>
#include <string>
#include <vector>
#if defined(_MSC_VER)
#pragma warning(disable : 4244 4267) // possible loss of data
#endif
#define GPT2_MAX_NODES 4096
static void ggml_log_callback_default(ggml_log_level level, const char *text, void *user_data)
{
(void)level;
(void)user_data;
fputs(text, stderr);
fflush(stderr);
}
// default hparams (GPT-2 117M)
struct gpt2_hparams
{
int32_t n_vocab = 50257;
int32_t n_ctx = 1024;
int32_t n_embd = 768;
int32_t n_head = 12;
int32_t n_layer = 12;
int32_t ftype = 1;
float eps = 1e-5f;
};
struct gpt2_layer
{
// normalization
struct ggml_tensor *ln_1_g;
struct ggml_tensor *ln_1_b;
struct ggml_tensor *ln_2_g;
struct ggml_tensor *ln_2_b;
// attention
struct ggml_tensor *c_attn_attn_w;
struct ggml_tensor *c_attn_attn_b;
struct ggml_tensor *c_attn_proj_w;
struct ggml_tensor *c_attn_proj_b;
// mlp
struct ggml_tensor *c_mlp_fc_w;
struct ggml_tensor *c_mlp_fc_b;
struct ggml_tensor *c_mlp_proj_w;
struct ggml_tensor *c_mlp_proj_b;
};
struct gpt2_model
{
gpt2_hparams hparams;
// normalization
struct ggml_tensor *ln_f_g;
struct ggml_tensor *ln_f_b;
struct ggml_tensor *wte; // position embedding
struct ggml_tensor *wpe; // token embedding
struct ggml_tensor *lm_head; // language model head
std::vector<gpt2_layer> layers;
// key + value memory
struct ggml_tensor *memory_k;
struct ggml_tensor *memory_v;
//
struct ggml_context *ctx_w;
struct ggml_context *ctx_kv;
ggml_backend_t backend = NULL;
ggml_backend_buffer_t buffer_w;
ggml_backend_buffer_t buffer_kv;
std::map<std::string, struct ggml_tensor *> tensors;
};
// load the model's weights from a file 从文件加载模型,初始化ggml后端
bool gpt2_model_load(const std::string &fname, gpt2_model &model, gpt_vocab &vocab, int n_ctx, int n_gpu_layers)
{
printf("%s: loading model from '%s'\n", __func__, fname.c_str());
auto fin = std::ifstream(fname, std::ios::binary);
if (!fin)
{
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
return false;
}
// verify magic 校验ggml文件头
{
uint32_t magic;
fin.read((char *)&magic, sizeof(magic));
if (magic != GGML_FILE_MAGIC)
{
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
return false;
}
}
// load hparams 加载超参数
{
auto &hparams = model.hparams;
fin.read((char *)&hparams.n_vocab, sizeof(hparams.n_vocab));
fin.read((char *)&hparams.n_ctx, sizeof(hparams.n_ctx));
fin.read((char *)&hparams.n_embd, sizeof(hparams.n_embd));
fin.read((char *)&hparams.n_head, sizeof(hparams.n_head));
fin.read((char *)&hparams.n_layer, sizeof(hparams.n_layer));
fin.read((char *)&hparams.ftype, sizeof(hparams.ftype));
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
printf("%s: n_head = %d\n", __func__, hparams.n_head);
printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
printf("%s: ftype = %d\n", __func__, hparams.ftype);
printf("%s: qntvr = %d\n", __func__, qntvr);
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
}
// load vocab 加载词汇表
{
int32_t n_vocab = 0;
fin.read((char *)&n_vocab, sizeof(n_vocab));
if (n_vocab != model.hparams.n_vocab)
{
fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n",
__func__, fname.c_str(), n_vocab, model.hparams.n_vocab);
return false;
}
std::string word;
std::vector<char> buf(128);
for (int i = 0; i < n_vocab; i++)
{
uint32_t len;
fin.read((char *)&len, sizeof(len));
buf.resize(len);
fin.read((char *)buf.data(), len);
word.assign(buf.data(), len);
vocab.token_to_id[word] = i;
vocab.id_to_token[i] = word;
}
}
//对于大张量,我们可以选择将数据存储在 16 位浮点数或量化中
// 为了节省内存并加快计算速度
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
// in order to save memory and also to speed up the computation
ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype)(model.hparams.ftype));
if (wtype == GGML_TYPE_COUNT)
{
fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n",
__func__, fname.c_str(), model.hparams.ftype);
return false;
}
auto &ctx = model.ctx_w;
// create the ggml context 根据模型张量数,创建并初始化ggml context
{
size_t n_tensors = 2 + 6 + 12 * model.hparams.n_layer;
struct ggml_init_params params = {
/*.mem_size =*/ggml_tensor_overhead() * n_tensors,
/*.mem_buffer =*/NULL,
/*.no_alloc =*/true,
};
ctx = ggml_init(params);
if (!ctx)
{
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
return false;
}
}
// initialize the backend 初始化cuda后端
#ifdef GGML_USE_CUDA
if (n_gpu_layers > 0)
{
fprintf(stderr, "%s: using CUDA backend\n", __func__);
model.backend = ggml_backend_cuda_init(0);
if (!model.backend)
{
fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
}
}
#endif
// 初始化 metal 后端
#ifdef GGML_USE_METAL
if (n_gpu_layers > 0)
{
fprintf(stderr, "%s: using Metal backend\n", __func__);
ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr);
model.backend = ggml_backend_metal_init();
if (!model.backend)
{
fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
}
}
#endif
// 初始化cpu后端
if (!model.backend)
{
// fallback to CPU backend
fprintf(stderr, "%s: using CPU backend\n", __func__);
model.backend = ggml_backend_cpu_init();
}
if (!model.backend)
{
fprintf(stderr, "%s: ggml_backend_cpu_init() failed\n", __func__);
return false;
}
// create the tensors for the model 创建模型张量
{
const auto &hparams = model.hparams;
const int n_embd = hparams.n_embd;
const int n_layer = hparams.n_layer;
const int n_ctx = hparams.n_ctx;
const int n_vocab = hparams.n_vocab;
model.layers.resize(n_layer);
model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
model.wpe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ctx);
model.lm_head = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
// map by name 映射张量<名称,张量>
model.tensors["model/ln_f/g"] = model.ln_f_g;
model.tensors["model/ln_f/b"] = model.ln_f_b;
model.tensors["model/wte"] = model.wte;
model.tensors["model/wpe"] = model.wpe;
model.tensors["model/lm_head"] = model.lm_head;
//创建各层张量
for (int i = 0; i < n_layer; ++i)
{
auto &layer = model.layers[i];
layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3 * n_embd);
layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3 * n_embd);
layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4 * n_embd);
layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * n_embd);
layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4 * n_embd, n_embd);
layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
// map by name 映射张量表<名称,张量>
model.tensors["model/h" + std::to_string(i) + "/ln_1/g"] = layer.ln_1_g;
model.tensors["model/h" + std::to_string(i) + "/ln_1/b"] = layer.ln_1_b;
model.tensors["model/h" + std::to_string(i) + "/ln_2/g"] = layer.ln_2_g;
model.tensors["model/h" + std::to_string(i) + "/ln_2/b"] = layer.ln_2_b;
model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/w"] = layer.c_attn_attn_w;
model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/b"] = layer.c_attn_attn_b;
model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/w"] = layer.c_attn_proj_w;
model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/b"] = layer.c_attn_proj_b;
model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/w"] = layer.c_mlp_fc_w;
model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/b"] = layer.c_mlp_fc_b;
model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/w"] = layer.c_mlp_proj_w;
model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/b"] = layer.c_mlp_proj_b;
}
}
// allocate the model tensors in a backend buffer 在ggml后端缓冲区中分配模型张量
model.buffer_w = ggml_backend_alloc_ctx_tensors(ctx, model.backend);
printf("%s: ggml tensor size = %d bytes\n", __func__, (int)sizeof(ggml_tensor));
printf("%s: backend buffer size = %6.2f MB\n", __func__, ggml_backend_buffer_get_size(model.buffer_w) / (1024.0 * 1024.0));
// override the default training context with the user-provide 使用用户提供的context数量
model.hparams.n_ctx = n_ctx;
// key + value memory 创建初始化 k,v缓存context
{
auto *ctx = model.ctx_kv;
// create the ggml context
{
size_t n_tensors = 2;
struct ggml_init_params params = {
/*.mem_size =*/ggml_tensor_overhead() * n_tensors,
/*.mem_buffer =*/NULL,
/*.no_alloc =*/true,
};
ctx = ggml_init(params);
if (!ctx)
{
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
return false;
}
}
const auto &hparams = model.hparams;
const int n_embd = hparams.n_embd;
const int n_layer = hparams.n_layer;
const int n_ctx = hparams.n_ctx;
const int n_mem = n_layer * n_ctx;
const int n_elements = n_embd * n_mem;
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements);
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements);
// allocate the KV memory in a backend buffer
// 在ggml后端分配k,v缓存
model.buffer_kv = ggml_backend_alloc_ctx_tensors(ctx, model.backend);
//获取后端内存大小
const size_t memory_size = ggml_backend_buffer_get_size(model.buffer_kv);
printf("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size / 1024.0 / 1024.0, n_mem);
}
// load weights 加载权重
{
size_t total_size = 0;
bool has_lm_head = false;
std::vector<char> read_buf;
while (true)
{
int32_t n_dims;
int32_t length;
int32_t ttype;
//读取模型张量维数,名称长度,量化类型
fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
fin.read(reinterpret_cast<char *>(&length), sizeof(length));
fin.read(reinterpret_cast<char *>(&ttype), sizeof(ttype));
if (fin.eof())
{
break;
}
int32_t nelements = 1; //统计张量元素个数
int32_t ne[2] = {1, 1};
for (int i = 0; i < n_dims; ++i)
{ //#读取张量所有维度值
fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
nelements *= ne[i];
}
std::string name(length, 0);
fin.read(&name[0], length); //读取张量名称
// 校验张量名称是否在模型张量表中
if (model.tensors.find(name) == model.tensors.end())
{
fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.c_str());
return false;
}
//获取张量
auto tensor = model.tensors[name];
ggml_set_name(tensor, name.c_str());
//校验张量元素个数
if (ggml_nelements(tensor) != nelements)
{
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.c_str());
return false;
}
//校验张量2个维度是否匹配
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1])
{
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
__func__, name.c_str(), (int)tensor->ne[0], (int)tensor->ne[1], ne[0], ne[1]);
return false;
}
// for debugging
if (0)
{
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.c_str(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor) / 1024.0 / 1024.0, ggml_nbytes(tensor));
}
//校验张量内存占用
const size_t bpe = ggml_type_size(ggml_type(ttype));
if ((nelements * bpe) / ggml_blck_size(tensor->type) != ggml_nbytes(tensor))
{
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
__func__, name.c_str(), ggml_nbytes(tensor), nelements * bpe);
return false;
}
//读取张量到ggml后端设备内存
if (ggml_backend_buffer_is_host(model.buffer_w))
{
// for some backends such as CPU and Metal, the tensor data is in system memory and we can read directly into it
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
}
else
{
// read into a temporary buffer first, then copy to device memory
read_buf.resize(ggml_nbytes(tensor));
fin.read(read_buf.data(), ggml_nbytes(tensor));
ggml_backend_tensor_set(tensor, read_buf.data(), 0, ggml_nbytes(tensor));
}
// GPT-2 models share the WTE tensor as the LM head
// GPT-2模型共享WTE张量作为LM头
if (name == "model/wte" && has_lm_head == false)
{
// ggml_backend_tensor_copy(tensor, model.lm_head);
model.lm_head = tensor;
}
if (name == "model/lm_head")
{
has_lm_head = true;
}
//统计权重占用设备内存大小
total_size += ggml_nbytes(tensor);
}
printf("%s: model size = %8.2f MB\n", __func__, total_size / 1024.0 / 1024.0);
}
fin.close();
return true;
}
// build the computation graph 创建计算图
struct ggml_cgraph *gpt2_graph(
const gpt2_model &model,
const int n_past,
const int n_tokens)
{
const int N = n_tokens;
const auto &hparams = model.hparams;
const int n_embd = hparams.n_embd;
const int n_layer = hparams.n_layer;
const int n_ctx = hparams.n_ctx;
const int n_head = hparams.n_head;
// since we are using ggml-alloc, this buffer only needs enough space to hold the ggml_tensor and ggml_cgraph structs, but not the tensor data
static size_t buf_size = ggml_tensor_overhead() * GPT2_MAX_NODES + ggml_graph_overhead_custom(GPT2_MAX_NODES, false);
static std::vector<uint8_t> buf(buf_size);
struct ggml_init_params params = {
/*.mem_size =*/buf_size,
/*.mem_buffer =*/buf.data(),
/*.no_alloc =*/true, // the tensors will be allocated later by ggml_gallocr_alloc_graph()
};
struct ggml_context *ctx = ggml_init(params);
struct ggml_cgraph *gf = ggml_new_graph_custom(ctx, GPT2_MAX_NODES, false);
struct ggml_tensor *embd = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, N);
// at this point, the tensor data is not allocated yet and cannot be set
// we will find the tensor after the graph is allocated by its name, and set the data then
ggml_set_name(embd, "embd");
// setting a tensor as an input will ensure that it is allocated at the beginning of the graph
// this is important to ensure that the input tensors are not overwritten before they are used
ggml_set_input(embd);
struct ggml_tensor *position = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, N);
ggml_set_name(position, "position");
ggml_set_input(position);
// wte + wpe 输入张量+ 位置编码
struct ggml_tensor *inpL =
ggml_add(ctx,
ggml_get_rows(ctx, model.wte, embd),
ggml_get_rows(ctx, model.wpe, position));
//遍历所有层向前计算
for (int il = 0; il < n_layer; ++il)
{
struct ggml_tensor *cur;
// norm 归一化层计算
{
// [ 768, N]
cur = ggml_norm(ctx, inpL, hparams.eps);
// cur = ln_1_g*cur + ln_1_b
// [ 768, N]
cur = ggml_add(ctx,
ggml_mul(ctx,
cur,
model.layers[il].ln_1_g),
model.layers[il].ln_1_b);
}
// attn
// [2304, 768] - model.layers[il].c_attn_attn_w
// [2304, 1] - model.layers[il].c_attn_attn_b
// [ 768, N] - cur (in)
// [2304, N] - cur (out)
//
// cur = attn_w*cur + attn_b
// [2304, N]
{
cur = ggml_mul_mat(ctx,
model.layers[il].c_attn_attn_w,
cur);
cur = ggml_add(ctx,
cur,
model.layers[il].c_attn_attn_b);
}
// self-attention 自注意力计算
{
struct ggml_tensor *Qcur = ggml_view_2d(ctx, cur, n_embd, N, cur->nb[1], 0 * sizeof(float) * n_embd);
struct ggml_tensor *Kcur = ggml_view_2d(ctx, cur, n_embd, N, cur->nb[1], 1 * sizeof(float) * n_embd);
struct ggml_tensor *Vcur = ggml_view_2d(ctx, cur, n_embd, N, cur->nb[1], 2 * sizeof(float) * n_embd);
// store key and value to memory k,v缓存到ggml后端设备内存
if (N >= 1)
{
struct ggml_tensor *k = ggml_view_1d(ctx, model.memory_k, N * n_embd, (ggml_element_size(model.memory_k) * n_embd) * (il * n_ctx + n_past));
struct ggml_tensor *v = ggml_view_1d(ctx, model.memory_v, N * n_embd, (ggml_element_size(model.memory_v) * n_embd) * (il * n_ctx + n_past));
ggml_build_forward_expand(gf, ggml_cpy(ctx, Kcur, k));
ggml_build_forward_expand(gf, ggml_cpy(ctx, Vcur, v));
}
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
// [64, N, 12]
struct ggml_tensor *Q =
ggml_permute(ctx,
ggml_cont_3d(ctx, Qcur, n_embd / n_head, n_head, N),
0, 2, 1, 3);
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
// [64, n_past + N, 12]
struct ggml_tensor *K =
ggml_permute(ctx,
ggml_reshape_3d(ctx,
ggml_view_1d(ctx, model.memory_k, (n_past + N) * n_embd, il * n_ctx * ggml_element_size(model.memory_k) * n_embd),
n_embd / n_head, n_head, n_past + N),
0, 2, 1, 3);
// GG: flash attention
// struct ggml_tensor * V =
// ggml_cpy(ctx0,
// ggml_permute(ctx0,
// ggml_reshape_3d(ctx0,
// ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
// n_embd/n_head, n_head, n_past + N),
// 1, 2, 0, 3),
// ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head));
// struct ggml_tensor * KQV = ggml_flash_attn(ctx0, Q, K, V, true);
//计算K*Q
// K * Q
// [n_past + N, N, 12]
struct ggml_tensor *KQ = ggml_mul_mat(ctx, K, Q);
//计算K*Q缩放
// KQ_scaled = KQ / sqrt(n_embd/n_head)
// [n_past + N, N, 12]
struct ggml_tensor *KQ_scaled =
ggml_scale(ctx,
KQ,
1.0f / sqrtf(float(n_embd) / n_head));
//计算KQ掩码
// KQ_masked = mask_past(KQ_scaled)
// [n_past + N, N, 12]
struct ggml_tensor *KQ_masked = ggml_diag_mask_inf(ctx, KQ_scaled, n_past);
// softmax计算
// KQ = soft_max(KQ_masked)
// [n_past + N, N, 12]
struct ggml_tensor *KQ_soft_max = ggml_soft_max(ctx, KQ_masked);
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
// [n_past + N, 64, 12]
struct ggml_tensor *V_trans =
ggml_cont_3d(ctx,
ggml_permute(ctx,
ggml_reshape_3d(ctx,
ggml_view_1d(ctx, model.memory_v, (n_past + N) * n_embd, il * n_ctx * ggml_element_size(model.memory_v) * n_embd),
n_embd / n_head, n_head, n_past + N),
1, 2, 0, 3),
n_past + N, n_embd / n_head, n_head);
//值编码矩阵*KQ_soft_max
// KQV = transpose(V) * KQ_soft_max
// [64, N, 12]
struct ggml_tensor *KQV = ggml_mul_mat(ctx, V_trans, KQ_soft_max);
// KQV_merged = KQV.permute(0, 2, 1, 3)
// [64, 12, N]
struct ggml_tensor *KQV_merged = ggml_permute(ctx, KQV, 0, 2, 1, 3);
// cur = KQV_merged.contiguous().view(n_embd, N)
// [768, N]
cur = ggml_cont_2d(ctx, KQV_merged, n_embd, N);
}
// projection 投影矩阵计算
// [ 768, 768] - model.layers[il].c_attn_proj_w
// [ 768, 1] - model.layers[il].c_attn_proj_b
// [ 768, N] - cur (in)
// [ 768, N] - cur (out)
//
// cur = proj_w*cur + proj_b
// [768, N]
{
cur = ggml_mul_mat(ctx,
model.layers[il].c_attn_proj_w,
cur);
cur = ggml_add(ctx,
cur,
model.layers[il].c_attn_proj_b);
}
// add the input 残差网络计算
cur = ggml_add(ctx, cur, inpL);
struct ggml_tensor *inpFF = cur;
// feed-forward network 前馈网络
{
// norm 归一化
{
cur = ggml_norm(ctx, inpFF, hparams.eps);
// cur = ln_2_g*cur + ln_2_b
// [ 768, N]
cur = ggml_add(ctx,
ggml_mul(ctx,
cur,
model.layers[il].ln_2_g),
model.layers[il].ln_2_b);
}
// fully connected 全连接
// [3072, 768] - model.layers[il].c_mlp_fc_w
// [3072, 1] - model.layers[il].c_mlp_fc_b
// [ 768, N] - cur (in)
// [3072, N] - cur (out)
//
// cur = fc_w*cur + fc_b
// [3072, N]
cur = ggml_mul_mat(ctx,
model.layers[il].c_mlp_fc_w,
cur);
cur = ggml_add(ctx,
cur,
model.layers[il].c_mlp_fc_b);
// GELU activation 激活函数
// [3072, N]
cur = ggml_gelu(ctx, cur);
// projection 投影
// [ 768, 3072] - model.layers[il].c_mlp_proj_w
// [ 768, 1] - model.layers[il].c_mlp_proj_b
// [3072, N] - cur (in)
// [ 768, N] - cur (out)
//
// cur = proj_w*cur + proj_b
// [768, N]
cur = ggml_mul_mat(ctx,
model.layers[il].c_mlp_proj_w,
cur);
cur = ggml_add(ctx,
cur,
model.layers[il].c_mlp_proj_b);
}
// input for next layer
inpL = ggml_add(ctx, cur, inpFF);
}
// norm 归一化
{
// [ 768, N]
inpL = ggml_norm(ctx, inpL, hparams.eps);
// inpL = ln_f_g*inpL + ln_f_b
// [ 768, N]
inpL = ggml_add(ctx,
ggml_mul(ctx,
inpL,
model.ln_f_g),
model.ln_f_b);
}
// inpL = WTE * inpL
// [ 768, 50257] - model.lm_head
// [ 768, N] - inpL
inpL = ggml_mul_mat(ctx, model.lm_head, inpL);
ggml_set_name(inpL, "logits");
// setting a tensor as the output will ensure that it is not overwritten by subsequent operations
// 设置一个张量作为输出将确保它不被后续操作覆盖
ggml_set_output(inpL);
// logits -> probs
// inpL = ggml_soft_max(ctx0, inpL);
ggml_build_forward_expand(gf, inpL);
//释放ggml后端计算内存
ggml_free(ctx);
//返回计算图
return gf;
}
// evaluate the transformer 使用计算图推理
//
// - model: the model
// - allocr: ggml_gallocr to use to allocate the compute buffer
// - n_threads: number of threads to use
// - n_past: the context size so far
// - embd_inp: the embeddings of the tokens in the context
// - embd_w: the predicted logits for the next token
//
bool gpt2_eval(
const gpt2_model &model,
ggml_gallocr_t allocr,
const int n_threads,
const int n_past,
const std::vector<gpt_vocab::id> &embd_inp,
std::vector<float> &embd_w)
{
//嵌入词汇表维度
const int N = embd_inp.size();
const auto &hparams = model.hparams;
//词汇数量
const int n_vocab = hparams.n_vocab;
//创建计算图
struct ggml_cgraph *gf = gpt2_graph(model, n_past, embd_inp.size());
// allocate the graph tensors 分配计算图设备后端内存
ggml_gallocr_alloc_graph(allocr, gf);
// set the graph inputs 设置计算图输入张量
struct ggml_tensor *embd = ggml_graph_get_tensor(gf, "embd");
ggml_backend_tensor_set(embd, embd_inp.data(), 0, N * ggml_element_size(embd));
//设置位置编码张量
struct ggml_tensor *position = ggml_graph_get_tensor(gf, "position");
for (int i = 0; i < N; ++i)
{
int32_t v = n_past + i;
ggml_backend_tensor_set(position, &v, i * sizeof(int32_t), sizeof(v));
}
// set backend options 设置后端操作
if (ggml_backend_is_cpu(model.backend))
{
ggml_backend_cpu_set_n_threads(model.backend, n_threads);
}
#ifdef GGML_USE_METAL
if (ggml_backend_is_metal(model.backend))
{
ggml_backend_metal_set_n_cb(model.backend, n_threads);
}
#endif
// run the computation 运行ggml后端计算图
ggml_backend_graph_compute(model.backend, gf);
// if (n_past%100 == 0) {
// ggml_graph_print (&gf);
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
// }
// get the graph outputs 获取计算图输出
struct ggml_tensor *logits = ggml_graph_get_tensor(gf, "logits");
// embd_w.resize(n_vocab*N);
// ggml_backend_tensor_get(logits, embd_w.data(), 0, sizeof(float)*n_vocab*N);
// return result just for the last token
// 返回最后一个token作为结果,放在embd_w中
embd_w.resize(n_vocab);
ggml_backend_tensor_get(logits, embd_w.data(), (n_vocab * (N - 1)) * sizeof(float), sizeof(float) * n_vocab);
return true;
}
int main(int argc, char **argv)
{
ggml_time_init();
const int64_t t_main_start_us = ggml_time_us();
gpt_params params;
params.model = "models/gpt-2-117M/ggml-model.bin";
//解析命令行参数
if (gpt_params_parse(argc, argv, params) == false)
{
return 1;
}
//设置随机数种子
if (params.seed < 0)
{
params.seed = time(NULL);
}
printf("%s: seed = %d\n", __func__, params.seed);
//随机提示
std::mt19937 rng(params.seed);
if (params.prompt.empty())
{
params.prompt = gpt_random_prompt(rng);
}
int64_t t_load_us = 0;
gpt_vocab vocab;
gpt2_model model;
// load the model 加载模型
{
const int64_t t_start_us = ggml_time_us();
if (!gpt2_model_load(params.model, model, vocab, params.n_ctx, params.n_gpu_layers))
{
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
return 1;
}
t_load_us = ggml_time_us() - t_start_us;
test_gpt_tokenizer(vocab, params.token_test);
}
ggml_gallocr_t allocr = NULL;
// allocate the compute buffer 分配计算缓存
{
// create a graph allocator with the backend's default buffer type
allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(model.backend));
// create the worst case graph for memory usage estimation
int n_tokens = std::min(model.hparams.n_ctx, params.n_batch);
int n_past = model.hparams.n_ctx - n_tokens;
struct ggml_cgraph *gf = gpt2_graph(model, n_past, n_tokens);
// pre-allocate the compute buffer for the worst case (optional)
ggml_gallocr_reserve(allocr, gf);
size_t mem_size = ggml_gallocr_get_buffer_size(allocr, 0);
fprintf(stderr, "%s: compute buffer size: %.2f MB\n", __func__, mem_size / 1024.0 / 1024.0);
}
int n_past = 0;
int64_t t_sample_us = 0;
int64_t t_predict_us = 0;
std::vector<float> logits;
// tokenize the prompt 提示分词编码存到向量embd_inp中
std::vector<gpt_vocab::id> embd_inp = ::gpt_tokenize(vocab, params.prompt);
//根据输入的词汇数目和模型的上下文大小,确定了模型需要预测的标记数量
params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int)embd_inp.size());
//打印提示词和前8个提示词编码
printf("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
printf("%s: number of tokens in prompt = %zu, first 8 tokens: ", __func__, embd_inp.size());
for (int i = 0; i < std::min(8, (int)embd_inp.size()); i++)
{
printf("%d ", embd_inp[i]);
}
printf("\n\n");
// submit the input prompt token-by-token
// this reduces the memory usage during inference, at the cost of a bit of speed at the beginning
//逐个输入提示token,这减少了推理过程中的内存使用量,但代价是一开始速度有点慢
std::vector<gpt_vocab::id> embd;
//模型推理
for (size_t i = embd.size(); i < embd_inp.size() + params.n_predict; i++)
{
// predict
if (embd.size() > 0)
{
const int64_t t_start_us = ggml_time_us();
//使用计算图推理
if (!gpt2_eval(model, allocr, params.n_threads, n_past, embd, logits))
{
printf("Failed to predict\n");
return 1;
}
t_predict_us += ggml_time_us() - t_start_us;
}
// 当前 上下文大小
n_past += embd.size();
embd.clear();
// 预测位置i大于提示词大小, 采样下一个token
if (i >= embd_inp.size())
{
// sample next token
const int top_k = params.top_k;
const float top_p = params.top_p;
const float temp = params.temp;
const int n_vocab = model.hparams.n_vocab;
gpt_vocab::id id = 0;
{
const int64_t t_start_sample_us = ggml_time_us();
id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng);
t_sample_us += ggml_time_us() - t_start_sample_us;
}
// add it to the context 采样结果添加到上下文
embd.push_back(id);
}
else
{
//处理输入提示
// if here, it means we are still processing the input prompt
for (size_t k = i; k < embd_inp.size(); k++)
{
embd.push_back(embd_inp[k]);
if (int32_t(embd.size()) >= params.n_batch)
{
break;
}
}
i += embd.size() - 1;
}
// display text 显示上下文结果
for (auto id : embd)
{
printf("%s", vocab.id_to_token[id].c_str());
}
fflush(stdout);
// end of text token
if (!params.ignore_eos && embd.back() == 50256)
{
break;
}
}
// report timing 打印耗时
{
const int64_t t_main_end_us = ggml_time_us();
printf("\n\n");
printf("%s: load time = %8.2f ms\n", __func__, t_load_us / 1000.0f);
printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us / 1000.0f);
printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us / 1000.0f, t_predict_us / 1000.0f / n_past);
printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us) / 1000.0f);
}
//释放资源
ggml_free(model.ctx_w);
ggml_gallocr_free(allocr);
ggml_backend_buffer_free(model.buffer_w);
ggml_backend_buffer_free(model.buffer_kv);
ggml_backend_free(model.backend);
return 0;
}