mirror of
https://github.com/ollama/ollama.git
synced 2026-04-27 08:15:32 +02:00
* TEMPORARY: Update the llama.cpp upstream to my fork's Granite Four branch
This will be redone once my branch is merged upstream in llama.cpp
* feat: Update all patches
There are a number that are no longer needed at all:
- 0003-embeddings: Embeddings entirely overhauled on master
- 0008-ensure-KV-cache-is-fully-defragmented: KV caching entirely
overhauled on master
- 0019-metal-add-mean-kernel-14267: Merged upstream
- 0020-CUDA-add-mean-operation-14313: Merged upstream
* feat: Sync llama.cpp and ggml
* fix: Update rsync-filter for all moved/new/removed files
* fix: Add files missing from sync
* fix: Update ggml rsync-filter for new ggml-cpu/arch subdirs
* fix: Add ggml files missing from sync
* fix: Narrow llama.cpp rsync-filter to not include mtmd main tool cpp files
* fix: Remove mtmd main cpp files
* fix: Add missing include in sampling_ext.cpp
* fix: Update llama.go to use mtmd instead of clip/llava
* fix: Add patch for mtmd_input_text
* chore: Ignore *.patched in the patch directory
* fix: Fix support for arch-specific ggml-cpu source files with new arrangement
In https://github.com/ggml-org/llama.cpp/pull/13892, all arch-specific
implementations were split out into a nested tree structure under
ggml-cpu/arch. This conflicts with standard CGO layout where all
arch-specific source files are expected to live in the same directory as
the parent go module and use suffixes based on GOOS and GOARCH. As such,
there were really two options for getting this to work:
1. Add a patch on top of the GGML sync to rearrange the files to match the
GO layout convention
2. Use CGO directives to conditionally include the nested source files in
the compilation units
This commit does (2) in order to minimize the set of changes needed on top
of the upstream file layout. To get this to work, there are two key things
needed:
1. In cpu.go, #cgo directives are added to explicitly set __${GOARCH}__ in
the preprocessor directives
2. In arch-impls.c|cpp, use an #ifdef | #elif defined | #endif chain to
explicitly include the .c|.cpp files for the given architecture from the
nested directory
* fix: Use mtmd_helper to correctly load the bitmap for the image
* fix: Apply patch for mtmd_text_input
* fix: Add missing stb to llama.cpp rsync-filter
* fix: Add sync'ed stb vendored header
* fix: Use c++17 and include vendor for go wrapper modules
* fix: Update patch 0015 for upstream implementation of uuid
* feat: Bump to the latest tip of the branch
* fix: Update patches for bump
* feat: Bump back to the cenral repo and point at the latest master
This includes granite 4 and a number of other model architectures!
* fix: Revert changes to ggml export GPU UUID patch
* fix: Add patch for GGML_VERSION and GGML_COMMIT constants
* feat: Sync all patched code
* build: Include cmake/common.cmake in ggml sync
* build: Add top-level include for GNUINstallDirs in CMakeLists.txt
This is used to populate CMAKE_INSTALL_BINDIR
* fix: Add a patch to avoid power throttling API on non-msvc windows builds
* fix: Sync patch changes for ggml-cpu.c
* feat: Bump llama.cpp to 4a4f42
This picks up support for Kimi K2 and PLaMO-2
* feat: Sync llama.cpp
* fix: Handle multi-chunk image encodings from mtmd
* fix: Re-number patches after merge with `main`
* feat: Bump to 41e78c in the makefile
* fix: Fix Solar and argsort/copy patches after bump
* fix: Remove Gemma3n CUDA Graphs patch
It was implemented upstream:
https://github.com/ggml-org/llama.cpp/pull/14741
* feat: Sync llama.cpp / ggml after latest bump
* build: Remove unnecessary CFLAGS definitions in cpu.go
* fix: Remove unnecessary additions in the rsync-filter
* fix: Remove unused vendored code for chat template parsing
* Revert "fix: Remove Gemma3n CUDA Graphs patch"
This reverts commit d724caced3.
* fix: Update 0020 CUDA Graphs for gemma3n to keep both llama.cpp and ollama fixes
https://github.com/ollama/ollama/pull/11195#issuecomment-3137312394
* fix: Sync ggml-cuda.cu after keeping both style cuda graph fixes for gemma3n
* unwind mxfp4 patch
Prepare to bump ggml with their impl for mxfp4
* bump
* fix windows build error
* Convert tensors at load time
Repack the mxfp4 tensors as ggmls kernels expect them to be.
* convert mlp bf16 to f32
* buffer the conversion better
* reshape earlier
* openai swiglu
* add ids
* split qkv, gate_up
* fix nested alt tags
* fast attention
* remove debug messages
* fix lint
* remove redundant test
* remap values only if source/target are different
* add back i32->i32 copy
* refactor cpu quants
* clean up vendor
* update patch instructions
* clean up patches
* remove webgpu
* update mem
* also handle gpt-oss
* revert convert changes
---------
Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
Co-authored-by: Gabe Goodhart <ghart@us.ibm.com>
Co-authored-by: Daniel Hiltgen <daniel@ollama.com>
1914 lines
62 KiB
C++
Vendored
1914 lines
62 KiB
C++
Vendored
#include "llama-graph.h"
|
|
|
|
#include "llama-impl.h"
|
|
#include "llama-batch.h"
|
|
#include "llama-cparams.h"
|
|
|
|
#include "llama-kv-cache-unified.h"
|
|
#include "llama-kv-cache-unified-iswa.h"
|
|
#include "llama-memory-hybrid.h"
|
|
#include "llama-memory-recurrent.h"
|
|
|
|
#include <cassert>
|
|
#include <cmath>
|
|
#include <cstring>
|
|
|
|
void llm_graph_input_embd::set_input(const llama_ubatch * ubatch) {
|
|
if (ubatch->token) {
|
|
const int64_t n_tokens = ubatch->n_tokens;
|
|
|
|
ggml_backend_tensor_set(tokens, ubatch->token, 0, n_tokens*ggml_element_size(tokens));
|
|
}
|
|
|
|
if (ubatch->embd) {
|
|
const int64_t n_embd = embd->ne[0];
|
|
const int64_t n_tokens = ubatch->n_tokens;
|
|
|
|
ggml_backend_tensor_set(embd, ubatch->embd, 0, n_tokens*n_embd*ggml_element_size(embd));
|
|
}
|
|
}
|
|
|
|
bool llm_graph_input_embd::can_reuse(const llm_graph_params & params) {
|
|
bool res = true;
|
|
|
|
res &= (!tokens && !params.ubatch.token) || (tokens && tokens->ne[0] == params.ubatch.n_tokens);
|
|
res &= (!embd && !params.ubatch.embd) || (embd && embd->ne[0] == params.ubatch.n_tokens);
|
|
|
|
return res;
|
|
}
|
|
|
|
void llm_graph_input_pos::set_input(const llama_ubatch * ubatch) {
|
|
if (ubatch->pos && pos) {
|
|
const int64_t n_tokens = ubatch->n_tokens;
|
|
|
|
if (ubatch->token && n_pos_per_embd == 4) {
|
|
// in case we're using M-RoPE with text tokens, convert the 1D positions to 4D
|
|
// the 3 first dims are the same, and 4th dim is all 0
|
|
std::vector<llama_pos> pos_data(n_tokens*n_pos_per_embd);
|
|
// copy the first dimension
|
|
for (int i = 0; i < n_tokens; ++i) {
|
|
pos_data[ i] = ubatch->pos[i];
|
|
pos_data[ n_tokens + i] = ubatch->pos[i];
|
|
pos_data[2 * n_tokens + i] = ubatch->pos[i];
|
|
pos_data[3 * n_tokens + i] = 0; // 4th dim is 0
|
|
}
|
|
ggml_backend_tensor_set(pos, pos_data.data(), 0, pos_data.size()*ggml_element_size(pos));
|
|
} else {
|
|
ggml_backend_tensor_set(pos, ubatch->pos, 0, n_tokens*n_pos_per_embd*ggml_element_size(pos));
|
|
}
|
|
}
|
|
}
|
|
|
|
bool llm_graph_input_pos::can_reuse(const llm_graph_params & params) {
|
|
bool res = true;
|
|
|
|
res &= pos->ne[0] == params.ubatch.n_tokens;
|
|
|
|
return res;
|
|
}
|
|
|
|
void llm_graph_input_attn_temp::set_input(const llama_ubatch * ubatch) {
|
|
if (ubatch->pos && attn_scale) {
|
|
const int64_t n_tokens = ubatch->n_tokens;
|
|
|
|
std::vector<float> attn_scale_data(n_tokens, 0.0f);
|
|
for (int i = 0; i < n_tokens; ++i) {
|
|
const float pos = ubatch->pos[i];
|
|
attn_scale_data[i] = std::log(
|
|
std::floor((pos + 1.0f) / n_attn_temp_floor_scale) + 1.0
|
|
) * f_attn_temp_scale + 1.0;
|
|
}
|
|
|
|
ggml_backend_tensor_set(attn_scale, attn_scale_data.data(), 0, n_tokens*ggml_element_size(attn_scale));
|
|
}
|
|
}
|
|
|
|
void llm_graph_input_pos_bucket::set_input(const llama_ubatch * ubatch) {
|
|
if (pos_bucket) {
|
|
const int64_t n_tokens = ubatch->n_tokens;
|
|
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(pos_bucket->buffer));
|
|
GGML_ASSERT(!ubatch->equal_seqs()); // TODO: use ubatch->n_seqs instead of failing
|
|
|
|
int32_t * data = (int32_t *) pos_bucket->data;
|
|
|
|
for (int h = 0; h < 1; ++h) {
|
|
for (int j = 0; j < n_tokens; ++j) {
|
|
for (int i = 0; i < n_tokens; ++i) {
|
|
data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(ubatch->pos[i], ubatch->pos[j], hparams.n_rel_attn_bkts, true);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) {
|
|
if (pos_bucket) {
|
|
mctx->set_input_pos_bucket(pos_bucket, ubatch);
|
|
}
|
|
}
|
|
|
|
void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) {
|
|
GGML_ASSERT(out_ids);
|
|
|
|
const int64_t n_tokens = ubatch->n_tokens;
|
|
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer));
|
|
int32_t * data = (int32_t *) out_ids->data;
|
|
|
|
if (n_outputs == n_tokens) {
|
|
for (int i = 0; i < n_tokens; ++i) {
|
|
data[i] = i;
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
GGML_ASSERT(ubatch->output);
|
|
|
|
int n_outputs = 0;
|
|
|
|
for (int i = 0; i < n_tokens; ++i) {
|
|
if (ubatch->output[i]) {
|
|
data[n_outputs++] = i;
|
|
}
|
|
}
|
|
}
|
|
|
|
bool llm_graph_input_out_ids::can_reuse(const llm_graph_params & params) {
|
|
bool res = true;
|
|
|
|
res &= n_outputs == params.n_outputs;
|
|
|
|
return res;
|
|
}
|
|
|
|
void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) {
|
|
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
|
|
const int64_t n_tokens = ubatch->n_tokens;
|
|
const int64_t n_seq_tokens = ubatch->n_seq_tokens;
|
|
const int64_t n_seqs_unq = ubatch->n_seqs_unq;
|
|
|
|
GGML_ASSERT(mean);
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer));
|
|
|
|
float * data = (float *) mean->data;
|
|
memset(mean->data, 0, n_tokens*n_seqs_unq*ggml_element_size(mean));
|
|
|
|
std::vector<uint64_t> sums(n_seqs_unq, 0);
|
|
for (int i = 0; i < n_tokens; i += n_seq_tokens) {
|
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
|
|
const llama_seq_id seq_id = ubatch->seq_id[i][s];
|
|
const int32_t seq_idx = ubatch->seq_idx[seq_id];
|
|
|
|
sums[seq_idx] += ubatch->n_seq_tokens;
|
|
}
|
|
}
|
|
|
|
std::vector<float> div(n_seqs_unq, 0.0f);
|
|
for (int s = 0; s < n_seqs_unq; ++s) {
|
|
const uint64_t sum = sums[s];
|
|
if (sum > 0) {
|
|
div[s] = 1.0f/float(sum);
|
|
}
|
|
}
|
|
|
|
for (int i = 0; i < n_tokens; i += n_seq_tokens) {
|
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
|
|
const llama_seq_id seq_id = ubatch->seq_id[i][s];
|
|
const int32_t seq_idx = ubatch->seq_idx[seq_id];
|
|
|
|
for (int j = 0; j < n_seq_tokens; ++j) {
|
|
data[seq_idx*n_tokens + i + j] = div[seq_idx];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) {
|
|
const int64_t n_tokens = ubatch->n_tokens;
|
|
const int64_t n_seqs_unq = ubatch->n_seqs_unq;
|
|
|
|
if (cparams.embeddings && (
|
|
cparams.pooling_type == LLAMA_POOLING_TYPE_CLS ||
|
|
cparams.pooling_type == LLAMA_POOLING_TYPE_RANK ||
|
|
cparams.pooling_type == LLAMA_POOLING_TYPE_LAST
|
|
)) {
|
|
GGML_ASSERT(cls);
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
|
|
|
|
uint32_t * data = (uint32_t *) cls->data;
|
|
memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls));
|
|
|
|
std::vector<int> target_pos(n_seqs_unq, -1);
|
|
std::vector<int> target_row(n_seqs_unq, -1);
|
|
|
|
bool last = cparams.pooling_type == LLAMA_POOLING_TYPE_LAST;
|
|
|
|
for (int i = 0; i < n_tokens; ++i) {
|
|
const llama_pos pos = ubatch->pos[i];
|
|
|
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
|
|
const llama_seq_id seq_id = ubatch->seq_id[i][s];
|
|
const int32_t seq_idx = ubatch->seq_idx[seq_id];
|
|
|
|
if (
|
|
(target_pos[seq_idx] == -1) ||
|
|
( last && pos >= target_pos[seq_idx]) ||
|
|
(!last && pos < target_pos[seq_idx])
|
|
) {
|
|
target_pos[seq_idx] = pos;
|
|
target_row[seq_idx] = i;
|
|
}
|
|
}
|
|
}
|
|
|
|
for (int s = 0; s < n_seqs_unq; ++s) {
|
|
if (target_row[s] >= 0) {
|
|
data[s] = target_row[s];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void llm_graph_input_rs::set_input(const llama_ubatch * ubatch) {
|
|
GGML_UNUSED(ubatch);
|
|
|
|
const int64_t n_rs = mctx->get_n_rs();
|
|
|
|
if (s_copy) {
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer));
|
|
int32_t * data = (int32_t *) s_copy->data;
|
|
|
|
// assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
|
|
for (uint32_t i = 0; i < n_rs; ++i) {
|
|
data[i] = mctx->s_copy(i);
|
|
}
|
|
}
|
|
}
|
|
|
|
void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) {
|
|
GGML_UNUSED(ubatch);
|
|
|
|
if (cross_embd && !cross->v_embd.empty()) {
|
|
assert(cross_embd->type == GGML_TYPE_F32);
|
|
|
|
ggml_backend_tensor_set(cross_embd, cross->v_embd.data(), 0, ggml_nbytes(cross_embd));
|
|
}
|
|
}
|
|
|
|
void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) {
|
|
const int64_t n_kv = ubatch->n_tokens;
|
|
const int64_t n_tokens = ubatch->n_tokens;
|
|
|
|
GGML_ASSERT(kq_mask);
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer));
|
|
|
|
float * data = (float *) kq_mask->data;
|
|
|
|
for (int h = 0; h < 1; ++h) {
|
|
for (int i1 = 0; i1 < n_tokens; ++i1) {
|
|
const llama_seq_id s1 = ubatch->seq_id[i1][0];
|
|
|
|
for (int i0 = 0; i0 < n_tokens; ++i0) {
|
|
float f = -INFINITY;
|
|
|
|
for (int s = 0; s < ubatch->n_seq_id[i0]; ++s) {
|
|
const llama_seq_id s0 = ubatch->seq_id[i0][0];
|
|
|
|
// TODO: reimplement this like in llama_kv_cache_unified
|
|
if (s0 == s1 && (!cparams.causal_attn || ubatch->pos[i0] <= ubatch->pos[i1])) {
|
|
if (hparams.use_alibi) {
|
|
f = -std::abs(ubatch->pos[i0] - ubatch->pos[i1]);
|
|
} else {
|
|
f = 0.0f;
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
|
|
data[h*(n_kv*n_tokens) + i1*n_kv + i0] = f;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void llm_graph_input_attn_kv_unified::set_input(const llama_ubatch * ubatch) {
|
|
mctx->set_input_k_idxs(self_k_idxs, ubatch);
|
|
mctx->set_input_v_idxs(self_v_idxs, ubatch);
|
|
|
|
mctx->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn);
|
|
}
|
|
|
|
bool llm_graph_input_attn_kv_unified::can_reuse(const llm_graph_params & params) {
|
|
const auto * mctx = static_cast<const llama_kv_cache_unified_context *>(params.mctx);
|
|
|
|
this->mctx = mctx;
|
|
|
|
bool res = true;
|
|
|
|
res &= self_k_idxs->ne[0] == params.ubatch.n_tokens;
|
|
//res &= self_v_idxs->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
|
|
|
|
res &= self_kq_mask->ne[0] == mctx->get_n_kv();
|
|
res &= self_kq_mask->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD);
|
|
|
|
res &= mctx->get_supports_set_rows(); // TODO: tmp
|
|
|
|
return res;
|
|
}
|
|
|
|
void llm_graph_input_attn_kv_unified_iswa::set_input(const llama_ubatch * ubatch) {
|
|
mctx->get_base()->set_input_k_idxs(self_k_idxs, ubatch);
|
|
mctx->get_base()->set_input_v_idxs(self_v_idxs, ubatch);
|
|
|
|
mctx->get_base()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn);
|
|
|
|
mctx->get_swa()->set_input_k_idxs(self_k_idxs_swa, ubatch);
|
|
mctx->get_swa()->set_input_v_idxs(self_v_idxs_swa, ubatch);
|
|
|
|
mctx->get_swa()->set_input_kq_mask(self_kq_mask_swa, ubatch, cparams.causal_attn);
|
|
}
|
|
|
|
bool llm_graph_input_attn_kv_unified_iswa::can_reuse(const llm_graph_params & params) {
|
|
const auto * mctx = static_cast<const llama_kv_cache_unified_iswa_context *>(params.mctx);
|
|
|
|
this->mctx = mctx;
|
|
|
|
bool res = true;
|
|
|
|
res &= self_k_idxs->ne[0] == params.ubatch.n_tokens;
|
|
//res &= self_v_idxs->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
|
|
|
|
res &= self_k_idxs_swa->ne[0] == params.ubatch.n_tokens;
|
|
//res &= self_v_idxs_swa->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
|
|
|
|
res &= self_kq_mask->ne[0] == mctx->get_base()->get_n_kv();
|
|
res &= self_kq_mask->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD);
|
|
|
|
res &= self_kq_mask_swa->ne[0] == mctx->get_swa()->get_n_kv();
|
|
res &= self_kq_mask_swa->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD);
|
|
|
|
res &= mctx->get_base()->get_supports_set_rows(); // TODO: tmp
|
|
|
|
return res;
|
|
}
|
|
|
|
void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
|
|
GGML_ASSERT(cross_kq_mask);
|
|
|
|
const int64_t n_enc = cross_kq_mask->ne[0];
|
|
const int64_t n_tokens = ubatch->n_tokens;
|
|
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer));
|
|
GGML_ASSERT(!ubatch->equal_seqs()); // TODO: use ubatch->n_seqs instead of failing
|
|
|
|
float * data = (float *) cross_kq_mask->data;
|
|
|
|
for (int h = 0; h < 1; ++h) {
|
|
for (int i = 0; i < n_tokens; ++i) {
|
|
for (int j = 0; j < n_enc; ++j) {
|
|
float f = -INFINITY;
|
|
|
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
|
|
const llama_seq_id seq_id = ubatch->seq_id[i][s];
|
|
|
|
if (cross->seq_ids_enc[j].find(seq_id) != cross->seq_ids_enc[j].end()) {
|
|
f = 0.0f;
|
|
}
|
|
}
|
|
|
|
data[h*(n_enc*n_tokens) + i*n_enc + j] = f;
|
|
}
|
|
}
|
|
|
|
for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
|
|
for (int j = 0; j < n_enc; ++j) {
|
|
data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) {
|
|
inp_attn->set_input(ubatch);
|
|
inp_rs->set_input(ubatch);
|
|
}
|
|
|
|
//
|
|
// llm_graph_result
|
|
//
|
|
|
|
llm_graph_result::llm_graph_result(int64_t max_nodes) : max_nodes(max_nodes) {
|
|
reset();
|
|
|
|
const char * LLAMA_GRAPH_RESULT_DEBUG = getenv("LLAMA_GRAPH_RESULT_DEBUG");
|
|
debug = LLAMA_GRAPH_RESULT_DEBUG ? atoi(LLAMA_GRAPH_RESULT_DEBUG) : 0;
|
|
}
|
|
|
|
int64_t llm_graph_result::get_max_nodes() const {
|
|
return max_nodes;
|
|
}
|
|
|
|
void llm_graph_result::reset() {
|
|
t_tokens = nullptr;
|
|
t_logits = nullptr;
|
|
t_embd = nullptr;
|
|
t_embd_pooled = nullptr;
|
|
|
|
params = {};
|
|
|
|
inputs.clear();
|
|
|
|
buf_compute_meta.resize(ggml_tensor_overhead()*max_nodes + ggml_graph_overhead_custom(max_nodes, false));
|
|
|
|
ggml_init_params params = {
|
|
/*.mem_size =*/ buf_compute_meta.size(),
|
|
/*.mem_buffer =*/ buf_compute_meta.data(),
|
|
/*.no_alloc =*/ true,
|
|
};
|
|
|
|
ctx_compute.reset(ggml_init(params));
|
|
|
|
gf = ggml_new_graph_custom(ctx_compute.get(), max_nodes, false);
|
|
}
|
|
|
|
void llm_graph_result::set_inputs(const llama_ubatch * ubatch) {
|
|
for (auto & input : inputs) {
|
|
input->set_input(ubatch);
|
|
}
|
|
}
|
|
|
|
bool llm_graph_result::can_reuse(const llm_graph_params & params) {
|
|
if (!this->params.allow_reuse(params)) {
|
|
if (debug > 1) {
|
|
LLAMA_LOG_DEBUG("%s: cannot reuse graph due to incompatible graph parameters\n", __func__);
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
if (debug > 1) {
|
|
LLAMA_LOG_DEBUG("%s: checking compatibility of %d inputs:\n", __func__, (int) inputs.size());
|
|
}
|
|
|
|
bool res = true;
|
|
|
|
for (auto & input : inputs) {
|
|
const bool cur = input->can_reuse(params);
|
|
|
|
if (debug > 1) {
|
|
LLAMA_LOG_DEBUG("%s: can_reuse = %d\n", "placeholder", cur);
|
|
}
|
|
|
|
res = res && cur;
|
|
}
|
|
|
|
if (debug > 0) {
|
|
LLAMA_LOG_DEBUG("%s: can reuse graph = %d\n", __func__, res);
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
llm_graph_input_i * llm_graph_result::add_input(llm_graph_input_ptr input) {
|
|
inputs.emplace_back(std::move(input));
|
|
return inputs.back().get();
|
|
}
|
|
|
|
void llm_graph_result::set_params(const llm_graph_params & params) {
|
|
this->params = params;
|
|
}
|
|
|
|
//
|
|
// llm_graph_context
|
|
//
|
|
|
|
llm_graph_context::llm_graph_context(const llm_graph_params & params) :
|
|
arch (params.arch),
|
|
hparams (params.hparams),
|
|
cparams (params.cparams),
|
|
ubatch (params.ubatch),
|
|
n_embd (hparams.n_embd),
|
|
n_layer (hparams.n_layer),
|
|
n_rot (hparams.n_rot),
|
|
n_ctx (cparams.n_ctx),
|
|
n_head (hparams.n_head()),
|
|
n_head_kv (hparams.n_head_kv()),
|
|
n_embd_head_k (hparams.n_embd_head_k),
|
|
n_embd_k_gqa (hparams.n_embd_k_gqa()),
|
|
n_embd_head_v (hparams.n_embd_head_v),
|
|
n_embd_v_gqa (hparams.n_embd_v_gqa()),
|
|
n_expert (hparams.n_expert),
|
|
n_expert_used (cparams.warmup ? hparams.n_expert : hparams.n_expert_used),
|
|
freq_base (cparams.rope_freq_base),
|
|
freq_scale (cparams.rope_freq_scale),
|
|
ext_factor (cparams.yarn_ext_factor),
|
|
attn_factor (cparams.yarn_attn_factor),
|
|
beta_fast (cparams.yarn_beta_fast),
|
|
beta_slow (cparams.yarn_beta_slow),
|
|
norm_eps (hparams.f_norm_eps),
|
|
norm_rms_eps (hparams.f_norm_rms_eps),
|
|
n_tokens (ubatch.n_tokens),
|
|
n_outputs (params.n_outputs),
|
|
n_ctx_orig (cparams.n_ctx_orig_yarn),
|
|
pooling_type (cparams.pooling_type),
|
|
rope_type (hparams.rope_type),
|
|
sched (params.sched),
|
|
backend_cpu (params.backend_cpu),
|
|
cvec (params.cvec),
|
|
loras (params.loras),
|
|
mctx (params.mctx),
|
|
cross (params.cross),
|
|
cb_func (params.cb),
|
|
res (params.res),
|
|
ctx0 (res->get_ctx()),
|
|
gf (res->get_gf()) {
|
|
res->set_params(params);
|
|
}
|
|
|
|
void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const {
|
|
if (cb_func) {
|
|
cb_func(ubatch, cur, name, il);
|
|
}
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_cvec(
|
|
ggml_tensor * cur,
|
|
int il) const {
|
|
return cvec->apply_to(ctx0, cur, il);
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_lora_mm(
|
|
ggml_tensor * w,
|
|
ggml_tensor * cur) const {
|
|
ggml_tensor * res = ggml_mul_mat(ctx0, w, cur);
|
|
|
|
for (const auto & lora : *loras) {
|
|
llama_adapter_lora_weight * lw = lora.first->get_weight(w);
|
|
if (lw == nullptr) {
|
|
continue;
|
|
}
|
|
|
|
const float adapter_scale = lora.second;
|
|
const float scale = lw->get_scale(lora.first->alpha, adapter_scale);
|
|
|
|
ggml_tensor * ab_cur = ggml_mul_mat(
|
|
ctx0, lw->b,
|
|
ggml_mul_mat(ctx0, lw->a, cur)
|
|
);
|
|
|
|
ab_cur = ggml_scale(ctx0, ab_cur, scale);
|
|
res = ggml_add(ctx0, res, ab_cur);
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_lora_mm_id(
|
|
ggml_tensor * w, // ggml_tensor * as
|
|
ggml_tensor * cur, // ggml_tensor * b
|
|
ggml_tensor * ids) const {
|
|
ggml_tensor * res = ggml_mul_mat_id(ctx0, w, cur, ids);
|
|
for (const auto & lora : *loras) {
|
|
llama_adapter_lora_weight * lw = lora.first->get_weight(w);
|
|
if (lw == nullptr) {
|
|
continue;
|
|
}
|
|
|
|
const float alpha = lora.first->alpha;
|
|
const float rank = (float) lw->b->ne[0];
|
|
const float scale = alpha ? lora.second * alpha / rank : lora.second;
|
|
|
|
ggml_tensor * ab_cur = ggml_mul_mat_id(
|
|
ctx0, lw->b,
|
|
ggml_mul_mat_id(ctx0, lw->a, cur, ids),
|
|
ids
|
|
);
|
|
|
|
ab_cur = ggml_scale(ctx0, ab_cur, scale);
|
|
res = ggml_add(ctx0, res, ab_cur);
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_norm(
|
|
ggml_tensor * cur,
|
|
ggml_tensor * mw,
|
|
ggml_tensor * mb,
|
|
llm_norm_type type,
|
|
int il) const {
|
|
switch (type) {
|
|
case LLM_NORM: cur = ggml_norm (ctx0, cur, hparams.f_norm_eps); break;
|
|
case LLM_NORM_RMS: cur = ggml_rms_norm(ctx0, cur, hparams.f_norm_rms_eps); break;
|
|
case LLM_NORM_GROUP:
|
|
{
|
|
cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], 1, cur->ne[1]);
|
|
cur = ggml_group_norm(ctx0, cur, hparams.n_norm_groups, hparams.f_norm_group_eps);
|
|
cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], cur->ne[2]);
|
|
} break;
|
|
}
|
|
|
|
if (mw || mb) {
|
|
cb(cur, "norm", il);
|
|
}
|
|
|
|
if (mw) {
|
|
cur = ggml_mul(ctx0, cur, mw);
|
|
if (mb) {
|
|
cb(cur, "norm_w", il);
|
|
}
|
|
}
|
|
|
|
if (mb) {
|
|
cur = ggml_add(ctx0, cur, mb);
|
|
}
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_ffn(
|
|
ggml_tensor * cur,
|
|
ggml_tensor * up,
|
|
ggml_tensor * up_b,
|
|
ggml_tensor * up_s,
|
|
ggml_tensor * gate,
|
|
ggml_tensor * gate_b,
|
|
ggml_tensor * gate_s,
|
|
ggml_tensor * down,
|
|
ggml_tensor * down_b,
|
|
ggml_tensor * down_s,
|
|
ggml_tensor * act_scales,
|
|
llm_ffn_op_type type_op,
|
|
llm_ffn_gate_type type_gate,
|
|
int il) const {
|
|
ggml_tensor * tmp = up ? build_lora_mm(up, cur) : cur;
|
|
cb(tmp, "ffn_up", il);
|
|
|
|
if (up_b) {
|
|
tmp = ggml_add(ctx0, tmp, up_b);
|
|
cb(tmp, "ffn_up_b", il);
|
|
}
|
|
|
|
if (up_s) {
|
|
tmp = ggml_mul(ctx0, tmp, up_s);
|
|
cb(tmp, "ffn_up_s", il);
|
|
}
|
|
|
|
if (gate) {
|
|
switch (type_gate) {
|
|
case LLM_FFN_SEQ:
|
|
{
|
|
cur = build_lora_mm(gate, tmp);
|
|
cb(cur, "ffn_gate", il);
|
|
} break;
|
|
case LLM_FFN_PAR:
|
|
{
|
|
cur = build_lora_mm(gate, cur);
|
|
cb(cur, "ffn_gate", il);
|
|
} break;
|
|
}
|
|
|
|
if (gate_b) {
|
|
cur = ggml_add(ctx0, cur, gate_b);
|
|
cb(cur, "ffn_gate_b", il);
|
|
}
|
|
|
|
if (gate_s) {
|
|
cur = ggml_mul(ctx0, cur, gate_s);
|
|
cb(cur, "ffn_gate_s", il);
|
|
}
|
|
|
|
} else {
|
|
cur = tmp;
|
|
}
|
|
|
|
switch (type_op) {
|
|
case LLM_FFN_SILU:
|
|
if (gate && type_gate == LLM_FFN_PAR) {
|
|
cur = ggml_swiglu_split(ctx0, cur, tmp);
|
|
cb(cur, "ffn_swiglu", il);
|
|
type_gate = LLM_FFN_SEQ;
|
|
} else {
|
|
cur = ggml_silu(ctx0, cur);
|
|
cb(cur, "ffn_silu", il);
|
|
} break;
|
|
case LLM_FFN_GELU:
|
|
if (gate && type_gate == LLM_FFN_PAR) {
|
|
cur = ggml_geglu_split(ctx0, cur, tmp);
|
|
cb(cur, "ffn_geglu", il);
|
|
type_gate = LLM_FFN_SEQ;
|
|
} else {
|
|
cur = ggml_gelu(ctx0, cur);
|
|
cb(cur, "ffn_gelu", il);
|
|
if (act_scales != NULL) {
|
|
cur = ggml_div(ctx0, cur, act_scales);
|
|
cb(cur, "ffn_act", il);
|
|
}
|
|
} break;
|
|
case LLM_FFN_RELU:
|
|
if (gate && type_gate == LLM_FFN_PAR) {
|
|
cur = ggml_reglu_split(ctx0, cur, tmp);
|
|
cb(cur, "ffn_reglu", il);
|
|
type_gate = LLM_FFN_SEQ;
|
|
} else {
|
|
cur = ggml_relu(ctx0, cur);
|
|
cb(cur, "ffn_relu", il);
|
|
} break;
|
|
case LLM_FFN_RELU_SQR:
|
|
{
|
|
cur = ggml_relu(ctx0, cur);
|
|
cb(cur, "ffn_relu", il);
|
|
|
|
cur = ggml_sqr(ctx0, cur);
|
|
cb(cur, "ffn_sqr(relu)", il);
|
|
} break;
|
|
case LLM_FFN_SWIGLU:
|
|
{
|
|
cur = ggml_swiglu(ctx0, cur);
|
|
cb(cur, "ffn_swiglu", il);
|
|
} break;
|
|
case LLM_FFN_GEGLU:
|
|
{
|
|
cur = ggml_geglu(ctx0, cur);
|
|
cb(cur, "ffn_geglu", il);
|
|
} break;
|
|
case LLM_FFN_REGLU:
|
|
{
|
|
cur = ggml_reglu(ctx0, cur);
|
|
cb(cur, "ffn_reglu", il);
|
|
} break;
|
|
default:
|
|
GGML_ABORT("fatal error");
|
|
}
|
|
|
|
if (gate && type_gate == LLM_FFN_PAR) {
|
|
cur = ggml_mul(ctx0, cur, tmp);
|
|
cb(cur, "ffn_gate_par", il);
|
|
}
|
|
|
|
if (down) {
|
|
cur = build_lora_mm(down, cur);
|
|
if (arch == LLM_ARCH_GLM4 || arch == LLM_ARCH_GLM4_MOE) {
|
|
// GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators
|
|
ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
|
|
}
|
|
}
|
|
|
|
if (down_b) {
|
|
cb(cur, "ffn_down", il);
|
|
}
|
|
|
|
if (down_b) {
|
|
cur = ggml_add(ctx0, cur, down_b);
|
|
}
|
|
|
|
if (down_s) {
|
|
cur = ggml_mul(ctx0, cur, down_s);
|
|
cb(cur, "ffn_down_s", il);
|
|
}
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_moe_ffn(
|
|
ggml_tensor * cur,
|
|
ggml_tensor * gate_inp,
|
|
ggml_tensor * up_exps,
|
|
ggml_tensor * gate_exps,
|
|
ggml_tensor * down_exps,
|
|
ggml_tensor * exp_probs_b,
|
|
int64_t n_expert,
|
|
int64_t n_expert_used,
|
|
llm_ffn_op_type type_op,
|
|
bool norm_w,
|
|
bool scale_w,
|
|
float w_scale,
|
|
llama_expert_gating_func_type gating_op,
|
|
int il,
|
|
ggml_tensor * probs_in) const {
|
|
return build_moe_ffn(
|
|
cur,
|
|
gate_inp, /* gate_inp_b */ nullptr,
|
|
up_exps, /* up_exps_b */ nullptr,
|
|
gate_exps, /* gate_exps_b */ nullptr,
|
|
down_exps, /* down_exps_b */ nullptr,
|
|
exp_probs_b,
|
|
n_expert,
|
|
n_expert_used,
|
|
type_op,
|
|
norm_w,
|
|
scale_w,
|
|
w_scale,
|
|
gating_op,
|
|
il,
|
|
probs_in
|
|
);
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_moe_ffn(
|
|
ggml_tensor * cur,
|
|
ggml_tensor * gate_inp,
|
|
ggml_tensor * gate_inp_b,
|
|
ggml_tensor * up_exps,
|
|
ggml_tensor * up_exps_b,
|
|
ggml_tensor * gate_exps,
|
|
ggml_tensor * gate_exps_b,
|
|
ggml_tensor * down_exps,
|
|
ggml_tensor * down_exps_b,
|
|
ggml_tensor * exp_probs_b,
|
|
int64_t n_expert,
|
|
int64_t n_expert_used,
|
|
llm_ffn_op_type type_op,
|
|
bool norm_w,
|
|
bool scale_w,
|
|
float w_scale,
|
|
llama_expert_gating_func_type gating_op,
|
|
int il,
|
|
ggml_tensor * probs_in) const {
|
|
const int64_t n_embd = cur->ne[0];
|
|
const int64_t n_tokens = cur->ne[1];
|
|
const bool weight_before_ffn = arch == LLM_ARCH_LLAMA4; // for llama4, we apply the sigmoid-ed weights before the FFN
|
|
|
|
ggml_tensor * logits = nullptr;
|
|
|
|
if (probs_in == nullptr) {
|
|
logits = build_lora_mm(gate_inp, cur); // [n_expert, n_tokens]
|
|
cb(logits, "ffn_moe_logits", il);
|
|
} else {
|
|
logits = probs_in;
|
|
}
|
|
|
|
if (gate_inp_b) {
|
|
logits = ggml_add(ctx0, logits, gate_inp_b);
|
|
cb(logits, "ffn_moe_logits_biased", il);
|
|
}
|
|
|
|
ggml_tensor * probs = nullptr;
|
|
switch (gating_op) {
|
|
case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX:
|
|
{
|
|
probs = ggml_soft_max(ctx0, logits); // [n_expert, n_tokens]
|
|
} break;
|
|
case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID:
|
|
{
|
|
probs = ggml_sigmoid(ctx0, logits); // [n_expert, n_tokens]
|
|
} break;
|
|
case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT:
|
|
{
|
|
probs = logits; // [n_expert, n_tokens]
|
|
} break;
|
|
default:
|
|
GGML_ABORT("fatal error");
|
|
}
|
|
cb(probs, "ffn_moe_probs", il);
|
|
|
|
// add experts selection bias - introduced in DeepSeek V3
|
|
// leave probs unbiased as it's later used to get expert weights
|
|
ggml_tensor * selection_probs = probs;
|
|
if (exp_probs_b != nullptr) {
|
|
selection_probs = ggml_add(ctx0, probs, exp_probs_b);
|
|
cb(selection_probs, "ffn_moe_probs_biased", il);
|
|
}
|
|
|
|
// llama4 doesn't have exp_probs_b, and sigmoid is only used after top_k
|
|
// see: https://github.com/meta-llama/llama-models/blob/699a02993512fb36936b1b0741e13c06790bcf98/models/llama4/moe.py#L183-L198
|
|
if (arch == LLM_ARCH_LLAMA4) {
|
|
selection_probs = logits;
|
|
}
|
|
|
|
// select experts
|
|
ggml_tensor * selected_experts = ggml_top_k(ctx0, selection_probs, n_expert_used); // [n_expert_used, n_tokens]
|
|
cb(selected_experts->src[0], "ffn_moe_argsort", il);
|
|
cb(selected_experts, "ffn_moe_topk", il);
|
|
|
|
ggml_tensor * weights = ggml_get_rows(ctx0,
|
|
ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens), selected_experts); // [1, n_expert_used, n_tokens]
|
|
cb(weights, "ffn_moe_weights", il);
|
|
|
|
if (gating_op == LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT) {
|
|
weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens);
|
|
weights = ggml_soft_max(ctx0, weights); // [n_expert_used, n_tokens]
|
|
weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens);
|
|
cb(weights, "ffn_moe_weights_softmax", il);
|
|
}
|
|
|
|
if (norm_w) {
|
|
weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens);
|
|
|
|
ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights); // [1, n_tokens]
|
|
cb(weights_sum, "ffn_moe_weights_sum", il);
|
|
|
|
weights = ggml_div(ctx0, weights, weights_sum); // [n_expert_used, n_tokens]
|
|
cb(weights, "ffn_moe_weights_norm", il);
|
|
|
|
weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens);
|
|
}
|
|
if (scale_w) {
|
|
weights = ggml_scale(ctx0, weights, w_scale);
|
|
cb(weights, "ffn_moe_weights_scaled", il);
|
|
}
|
|
|
|
cur = ggml_reshape_3d(ctx0, cur, n_embd, 1, n_tokens);
|
|
|
|
if (weight_before_ffn) {
|
|
// repeat cur to [n_embd, n_expert_used, n_tokens]
|
|
ggml_tensor * repeated = ggml_repeat_4d(ctx0, cur, n_embd, n_expert_used, n_tokens, 1);
|
|
cur = ggml_mul(ctx0, repeated, weights);
|
|
cb(cur, "ffn_moe_weighted", il);
|
|
}
|
|
|
|
ggml_tensor * up = build_lora_mm_id(up_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
|
|
cb(up, "ffn_moe_up", il);
|
|
|
|
if (up_exps_b) {
|
|
up = ggml_add_id(ctx0, up, up_exps_b, selected_experts);
|
|
cb(up, "ffn_moe_up_biased", il);
|
|
}
|
|
|
|
ggml_tensor * experts = nullptr;
|
|
if (gate_exps) {
|
|
cur = build_lora_mm_id(gate_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
|
|
cb(cur, "ffn_moe_gate", il);
|
|
} else {
|
|
cur = up;
|
|
}
|
|
|
|
if (gate_exps_b) {
|
|
cur = ggml_add_id(ctx0, cur, gate_exps_b, selected_experts);
|
|
cb(cur, "ffn_moe_gate_biased", il);
|
|
}
|
|
|
|
switch (type_op) {
|
|
case LLM_FFN_SILU:
|
|
if (gate_exps) {
|
|
cur = ggml_swiglu_split(ctx0, cur, up);
|
|
cb(cur, "ffn_moe_swiglu", il);
|
|
} else {
|
|
cur = ggml_silu(ctx0, cur);
|
|
cb(cur, "ffn_moe_silu", il);
|
|
} break;
|
|
case LLM_FFN_GELU:
|
|
if (gate_exps) {
|
|
cur = ggml_geglu_split(ctx0, cur, up);
|
|
cb(cur, "ffn_moe_geglu", il);
|
|
} else {
|
|
cur = ggml_gelu(ctx0, cur);
|
|
cb(cur, "ffn_moe_gelu", il);
|
|
} break;
|
|
case LLM_FFN_SWIGLU_OAI_MOE:
|
|
{
|
|
// TODO: move to hparams?
|
|
constexpr float alpha = 1.702f;
|
|
constexpr float limit = 7.0f;
|
|
cur = ggml_swiglu_oai(ctx0, cur, up, alpha, limit);
|
|
cb(cur, "ffn_moe_swiglu_oai", il);
|
|
} break;
|
|
case LLM_FFN_RELU:
|
|
if (gate_exps) {
|
|
cur = ggml_reglu_split(ctx0, cur, up);
|
|
cb(cur, "ffn_moe_reglu", il);
|
|
} else {
|
|
cur = ggml_relu(ctx0, cur);
|
|
cb(cur, "ffn_moe_relu", il);
|
|
} break;
|
|
default:
|
|
GGML_ABORT("fatal error");
|
|
}
|
|
|
|
experts = build_lora_mm_id(down_exps, cur, selected_experts); // [n_embd, n_expert_used, n_tokens]
|
|
cb(experts, "ffn_moe_down", il);
|
|
|
|
if (down_exps_b) {
|
|
experts = ggml_add_id(ctx0, experts, down_exps_b, selected_experts);
|
|
cb(experts, "ffn_moe_down_biased", il);
|
|
}
|
|
|
|
if (!weight_before_ffn) {
|
|
experts = ggml_mul(ctx0, experts, weights);
|
|
cb(cur, "ffn_moe_weighted", il);
|
|
}
|
|
|
|
ggml_tensor * cur_experts[LLAMA_MAX_EXPERTS] = { nullptr };
|
|
|
|
assert(n_expert_used > 0);
|
|
|
|
// order the views before the adds
|
|
for (uint32_t i = 0; i < hparams.n_expert_used; ++i) {
|
|
cur_experts[i] = ggml_view_2d(ctx0, experts, n_embd, n_tokens, experts->nb[2], i*experts->nb[1]);
|
|
|
|
ggml_build_forward_expand(gf, cur_experts[i]);
|
|
}
|
|
|
|
// aggregate experts
|
|
// note: here we explicitly use hparams.n_expert_used instead of n_expert_used
|
|
// to avoid potentially a large number of add nodes during warmup
|
|
// ref: https://github.com/ggml-org/llama.cpp/pull/14753
|
|
ggml_tensor * moe_out = cur_experts[0];
|
|
|
|
for (uint32_t i = 1; i < hparams.n_expert_used; ++i) {
|
|
moe_out = ggml_add(ctx0, moe_out, cur_experts[i]);
|
|
}
|
|
|
|
if (hparams.n_expert_used == 1) {
|
|
// avoid returning a non-contiguous tensor
|
|
moe_out = ggml_cont(ctx0, moe_out);
|
|
}
|
|
|
|
cb(moe_out, "ffn_moe_out", il);
|
|
|
|
return moe_out;
|
|
}
|
|
|
|
// input embeddings with optional lora
|
|
ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const {
|
|
const int64_t n_embd = hparams.n_embd;
|
|
|
|
auto inp = std::make_unique<llm_graph_input_embd>();
|
|
|
|
ggml_tensor * cur = nullptr;
|
|
|
|
if (ubatch.token) {
|
|
inp->tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_tokens);
|
|
//cb(inp->tokens, "inp_tokens", -1);
|
|
ggml_set_input(inp->tokens);
|
|
res->t_tokens = inp->tokens;
|
|
|
|
cur = ggml_get_rows(ctx0, tok_embd, inp->tokens);
|
|
|
|
// apply lora for embedding tokens if needed
|
|
for (const auto & lora : *loras) {
|
|
llama_adapter_lora_weight * lw = lora.first->get_weight(tok_embd);
|
|
if (lw == nullptr) {
|
|
continue;
|
|
}
|
|
|
|
const float adapter_scale = lora.second;
|
|
const float scale = lw->get_scale(lora.first->alpha, adapter_scale);
|
|
|
|
ggml_tensor * inpL_delta = ggml_scale(ctx0, ggml_mul_mat(
|
|
ctx0, lw->b, // non-transposed lora_b
|
|
ggml_get_rows(ctx0, lw->a, inp->tokens)
|
|
), scale);
|
|
|
|
cur = ggml_add(ctx0, cur, inpL_delta);
|
|
}
|
|
} else {
|
|
inp->embd = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, ubatch.n_tokens);
|
|
ggml_set_input(inp->embd);
|
|
|
|
cur = inp->embd;
|
|
}
|
|
|
|
// For Granite architecture
|
|
if (hparams.f_embedding_scale != 0.0f) {
|
|
cur = ggml_scale(ctx0, cur, hparams.f_embedding_scale);
|
|
}
|
|
|
|
cb(cur, "inp_embd", -1);
|
|
|
|
res->add_input(std::move(inp));
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_pos() const {
|
|
auto inp = std::make_unique<llm_graph_input_pos>(hparams.n_pos_per_embd());
|
|
|
|
auto & cur = inp->pos;
|
|
|
|
cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, (int64_t)n_tokens*hparams.n_pos_per_embd());
|
|
ggml_set_input(cur);
|
|
|
|
res->add_input(std::move(inp));
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_attn_scale() const {
|
|
auto inp = std::make_unique<llm_graph_input_attn_temp>(hparams.n_attn_temp_floor_scale, hparams.f_attn_temp_scale);
|
|
|
|
auto & cur = inp->attn_scale;
|
|
|
|
// this need to be 1x1xN for broadcasting
|
|
cur = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 1, 1, n_tokens);
|
|
ggml_set_input(cur);
|
|
|
|
res->add_input(std::move(inp));
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_out_ids() const {
|
|
// note: when all tokens are output, we could skip this optimization to spare the ggml_get_rows() calls,
|
|
// but this would make the graph topology depend on the number of output tokens, which can interere with
|
|
// features that require constant topology such as pipline parallelism
|
|
// ref: https://github.com/ggml-org/llama.cpp/pull/14275#issuecomment-2987424471
|
|
//if (n_outputs < n_tokens) {
|
|
// return nullptr;
|
|
//}
|
|
|
|
auto inp = std::make_unique<llm_graph_input_out_ids>(hparams, cparams, n_outputs);
|
|
|
|
auto & cur = inp->out_ids;
|
|
|
|
cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs);
|
|
ggml_set_input(cur);
|
|
|
|
res->add_input(std::move(inp));
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_mean() const {
|
|
auto inp = std::make_unique<llm_graph_input_mean>(cparams);
|
|
|
|
auto & cur = inp->mean;
|
|
|
|
cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, ubatch.n_seqs_unq);
|
|
ggml_set_input(cur);
|
|
|
|
res->add_input(std::move(inp));
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_cls() const {
|
|
auto inp = std::make_unique<llm_graph_input_cls>(cparams);
|
|
|
|
auto & cur = inp->cls;
|
|
|
|
cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_seqs_unq);
|
|
ggml_set_input(cur);
|
|
|
|
res->add_input(std::move(inp));
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_cross_embd() const {
|
|
auto inp = std::make_unique<llm_graph_input_cross_embd>(cross);
|
|
|
|
auto & cur = inp->cross_embd;
|
|
|
|
// if we have the output embeddings from the encoder, use them directly
|
|
// TODO: needs more work to be correct, for now just use the tensor shape
|
|
//if (cross->t_embd) {
|
|
// cur = ggml_view_tensor(ctx0, cross->t_embd);
|
|
|
|
// return cur;
|
|
//}
|
|
|
|
const auto n_embd = !cross->v_embd.empty() ? cross->n_embd : hparams.n_embd;
|
|
const auto n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train;
|
|
|
|
cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_enc);
|
|
ggml_set_input(cur);
|
|
|
|
res->add_input(std::move(inp));
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_pos_bucket_enc() const {
|
|
auto inp = std::make_unique<llm_graph_input_pos_bucket>(hparams);
|
|
|
|
auto & cur = inp->pos_bucket;
|
|
|
|
cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_tokens);
|
|
ggml_set_input(cur);
|
|
|
|
res->add_input(std::move(inp));
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_pos_bucket_dec() const {
|
|
const auto * mctx_cur = static_cast<const llama_kv_cache_unified_context *>(mctx);
|
|
|
|
auto inp = std::make_unique<llm_graph_input_pos_bucket_kv>(hparams, mctx_cur);
|
|
|
|
const auto n_kv = mctx_cur->get_n_kv();
|
|
|
|
auto & cur = inp->pos_bucket;
|
|
|
|
cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens);
|
|
ggml_set_input(cur);
|
|
|
|
res->add_input(std::move(inp));
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const {
|
|
ggml_tensor * pos_bucket_1d = ggml_reshape_1d(ctx0, pos_bucket, pos_bucket->ne[0] * pos_bucket->ne[1]);
|
|
cb(pos_bucket_1d, "pos_bucket_1d", -1);
|
|
|
|
ggml_tensor * pos_bias = ggml_get_rows(ctx0, attn_rel_b, pos_bucket_1d);
|
|
|
|
pos_bias = ggml_reshape_3d(ctx0, pos_bias, pos_bias->ne[0], pos_bucket->ne[0], pos_bucket->ne[1]);
|
|
pos_bias = ggml_permute (ctx0, pos_bias, 2, 0, 1, 3);
|
|
pos_bias = ggml_cont (ctx0, pos_bias);
|
|
|
|
cb(pos_bias, "pos_bias", -1);
|
|
|
|
return pos_bias;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_attn_mha(
|
|
ggml_tensor * q,
|
|
ggml_tensor * k,
|
|
ggml_tensor * v,
|
|
ggml_tensor * kq_b,
|
|
ggml_tensor * kq_mask,
|
|
ggml_tensor * v_mla,
|
|
ggml_tensor * sinks,
|
|
float kq_scale) const {
|
|
const bool v_trans = v->nb[1] > v->nb[2];
|
|
|
|
// split the batch into streams if needed
|
|
const auto n_stream = k->ne[3];
|
|
|
|
q = ggml_reshape_4d(ctx0, q, q->ne[0], q->ne[1], q->ne[2]/n_stream, n_stream);
|
|
|
|
q = ggml_permute(ctx0, q, 0, 2, 1, 3);
|
|
k = ggml_permute(ctx0, k, 0, 2, 1, 3);
|
|
v = ggml_permute(ctx0, v, 0, 2, 1, 3);
|
|
|
|
const auto n_kv = k->ne[1];
|
|
|
|
ggml_tensor * cur;
|
|
|
|
// TODO: replace hardcoded padding with ggml-provided padding
|
|
if (cparams.flash_attn && (n_kv % 256 == 0) && kq_b == nullptr) {
|
|
GGML_ASSERT(kq_b == nullptr && "Flash attention does not support KQ bias yet");
|
|
|
|
if (v_trans) {
|
|
v = ggml_transpose(ctx0, v);
|
|
}
|
|
|
|
// this can happen when KV cache is not used (e.g. an embedding model with non-causal attn)
|
|
if (k->type == GGML_TYPE_F32) {
|
|
k = ggml_cast(ctx0, k, GGML_TYPE_F16);
|
|
}
|
|
|
|
if (v->type == GGML_TYPE_F32) {
|
|
v = ggml_cast(ctx0, v, GGML_TYPE_F16);
|
|
}
|
|
|
|
cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias,
|
|
hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f);
|
|
|
|
ggml_flash_attn_ext_add_sinks(cur, sinks);
|
|
ggml_flash_attn_ext_set_prec (cur, GGML_PREC_F32);
|
|
|
|
if (v_mla) {
|
|
#if 0
|
|
// v_mla can be applied as a matrix-vector multiplication with broadcasting across dimension 3 == n_tokens.
|
|
// However, the code is optimized for dimensions 0 and 1 being large, so this is ineffient.
|
|
cur = ggml_reshape_4d(ctx0, cur, v_mla->ne[0], 1, n_head, n_tokens);
|
|
cur = ggml_mul_mat(ctx0, v_mla, cur);
|
|
#else
|
|
// It's preferable to do the calculation as a matrix-matrix multiplication with n_tokens in dimension 1.
|
|
// The permutations are noops and only change how the tensor data is interpreted.
|
|
cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
|
|
cur = ggml_mul_mat(ctx0, v_mla, cur);
|
|
cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
|
|
cur = ggml_cont(ctx0, cur); // Needed because ggml_reshape_2d expects contiguous inputs.
|
|
#endif
|
|
}
|
|
|
|
cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]);
|
|
} else {
|
|
ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
|
|
|
|
// note: this op tends to require high floating point range
|
|
// while for some models F16 is enough, for others it is not, so we default to F32 here
|
|
ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
|
|
|
|
if (arch == LLM_ARCH_GROK) {
|
|
// need to do the following:
|
|
// multiply by attn_output_multiplyer of 0.08838834764831845
|
|
// and then :
|
|
// kq = 30 * tanh(kq / 30)
|
|
// before the softmax below
|
|
|
|
kq = ggml_tanh(ctx0, ggml_scale(ctx0, kq, 0.08838834764831845f/30.0f));
|
|
kq = ggml_scale(ctx0, kq, 30);
|
|
}
|
|
|
|
if (hparams.attn_soft_cap) {
|
|
kq = ggml_scale(ctx0, kq, 1.0f / hparams.f_attn_logit_softcapping);
|
|
kq = ggml_tanh (ctx0, kq);
|
|
kq = ggml_scale(ctx0, kq, hparams.f_attn_logit_softcapping);
|
|
}
|
|
|
|
if (kq_b) {
|
|
kq = ggml_add(ctx0, kq, kq_b);
|
|
}
|
|
|
|
kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias);
|
|
ggml_soft_max_add_sinks(kq, sinks);
|
|
|
|
if (!v_trans) {
|
|
// note: avoid this branch
|
|
v = ggml_cont(ctx0, ggml_transpose(ctx0, v));
|
|
}
|
|
|
|
ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq);
|
|
|
|
// for MLA with the absorption optimization, we need to "decompress" from MQA back to MHA
|
|
if (v_mla) {
|
|
kqv = ggml_mul_mat(ctx0, v_mla, kqv);
|
|
}
|
|
|
|
cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
|
|
|
|
// recombine streams
|
|
cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]);
|
|
|
|
if (!cparams.offload_kqv) {
|
|
// all nodes between the KV store and the attention output are run on the CPU
|
|
ggml_backend_sched_set_tensor_backend(sched, cur, backend_cpu);
|
|
}
|
|
}
|
|
|
|
ggml_build_forward_expand(gf, cur);
|
|
|
|
return cur;
|
|
}
|
|
|
|
llm_graph_input_attn_no_cache * llm_graph_context::build_attn_inp_no_cache() const {
|
|
auto inp = std::make_unique<llm_graph_input_attn_no_cache>(hparams, cparams);
|
|
|
|
// note: there is no KV cache, so the number of KV values is equal to the number of tokens in the batch
|
|
inp->kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD), 1, 1);
|
|
ggml_set_input(inp->kq_mask);
|
|
|
|
inp->kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->kq_mask, GGML_TYPE_F16) : inp->kq_mask;
|
|
|
|
return (llm_graph_input_attn_no_cache *) res->add_input(std::move(inp));
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_attn(
|
|
llm_graph_input_attn_no_cache * inp,
|
|
ggml_tensor * wo,
|
|
ggml_tensor * wo_b,
|
|
ggml_tensor * q_cur,
|
|
ggml_tensor * k_cur,
|
|
ggml_tensor * v_cur,
|
|
ggml_tensor * kq_b,
|
|
ggml_tensor * v_mla,
|
|
float kq_scale,
|
|
int il) const {
|
|
GGML_UNUSED(n_tokens);
|
|
|
|
// these nodes are added to the graph together so that they are not reordered
|
|
// by doing so, the number of splits in the graph is reduced
|
|
ggml_build_forward_expand(gf, q_cur);
|
|
ggml_build_forward_expand(gf, k_cur);
|
|
ggml_build_forward_expand(gf, v_cur);
|
|
|
|
const auto & kq_mask = inp->get_kq_mask();
|
|
|
|
// [TAG_NO_CACHE_PAD]
|
|
// TODO: if ubatch.equal_seqs() == true, we can split the three tensors below into ubatch.n_seqs_unq streams
|
|
assert(!ubatch.equal_seqs());
|
|
|
|
ggml_tensor * q = q_cur;
|
|
ggml_tensor * k = k_cur;
|
|
ggml_tensor * v = v_cur;
|
|
|
|
ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, v_mla, nullptr, kq_scale);
|
|
cb(cur, "kqv_out", il);
|
|
|
|
if (wo) {
|
|
cur = build_lora_mm(wo, cur);
|
|
}
|
|
|
|
if (wo_b) {
|
|
//cb(cur, "kqv_wo", il);
|
|
}
|
|
|
|
if (wo_b) {
|
|
cur = ggml_add(ctx0, cur, wo_b);
|
|
}
|
|
|
|
return cur;
|
|
}
|
|
|
|
static std::unique_ptr<llm_graph_input_attn_kv_unified> build_attn_inp_kv_unified_impl(
|
|
ggml_context * ctx0,
|
|
const llama_ubatch & ubatch,
|
|
const llama_hparams & hparams,
|
|
const llama_cparams & cparams,
|
|
const llama_kv_cache_unified_context * mctx_cur) {
|
|
|
|
auto inp = std::make_unique<llm_graph_input_attn_kv_unified>(hparams, cparams, mctx_cur);
|
|
|
|
{
|
|
GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified_iswa for SWA");
|
|
|
|
const auto n_kv = mctx_cur->get_n_kv();
|
|
const auto n_tokens = ubatch.n_tokens;
|
|
const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq;
|
|
|
|
inp->self_k_idxs = mctx_cur->build_input_k_idxs(ctx0, ubatch);
|
|
inp->self_v_idxs = mctx_cur->build_input_v_idxs(ctx0, ubatch);
|
|
|
|
inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream);
|
|
ggml_set_input(inp->self_kq_mask);
|
|
|
|
inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
|
|
}
|
|
|
|
return inp;
|
|
}
|
|
|
|
llm_graph_input_attn_kv_unified * llm_graph_context::build_attn_inp_kv_unified() const {
|
|
const auto * mctx_cur = static_cast<const llama_kv_cache_unified_context *>(mctx);
|
|
|
|
auto inp = build_attn_inp_kv_unified_impl(ctx0, ubatch, hparams, cparams, mctx_cur);
|
|
|
|
return (llm_graph_input_attn_kv_unified *) res->add_input(std::move(inp));
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_attn(
|
|
llm_graph_input_attn_kv_unified * inp,
|
|
ggml_tensor * wo,
|
|
ggml_tensor * wo_b,
|
|
ggml_tensor * q_cur,
|
|
ggml_tensor * k_cur,
|
|
ggml_tensor * v_cur,
|
|
ggml_tensor * kq_b,
|
|
ggml_tensor * v_mla,
|
|
float kq_scale,
|
|
int il) const {
|
|
// these nodes are added to the graph together so that they are not reordered
|
|
// by doing so, the number of splits in the graph is reduced
|
|
ggml_build_forward_expand(gf, q_cur);
|
|
ggml_build_forward_expand(gf, k_cur);
|
|
ggml_build_forward_expand(gf, v_cur);
|
|
|
|
const auto * mctx_cur = inp->mctx;
|
|
|
|
// store to KV cache
|
|
{
|
|
const auto & k_idxs = inp->get_k_idxs();
|
|
const auto & v_idxs = inp->get_v_idxs();
|
|
|
|
ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il));
|
|
ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il));
|
|
}
|
|
|
|
const auto & kq_mask = inp->get_kq_mask();
|
|
|
|
ggml_tensor * q = q_cur;
|
|
ggml_tensor * k = mctx_cur->get_k(ctx0, il);
|
|
ggml_tensor * v = mctx_cur->get_v(ctx0, il);
|
|
|
|
ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, v_mla, nullptr, kq_scale);
|
|
cb(cur, "kqv_out", il);
|
|
|
|
if (wo) {
|
|
cur = build_lora_mm(wo, cur);
|
|
if (arch == LLM_ARCH_GLM4 || arch == LLM_ARCH_GLM4_MOE) {
|
|
// GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators
|
|
ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
|
|
}
|
|
}
|
|
|
|
if (wo_b) {
|
|
cur = ggml_add(ctx0, cur, wo_b);
|
|
}
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_attn(
|
|
llm_graph_input_attn_kv_unified_iswa * inp,
|
|
ggml_tensor * wo,
|
|
ggml_tensor * wo_b,
|
|
ggml_tensor * q_cur,
|
|
ggml_tensor * k_cur,
|
|
ggml_tensor * v_cur,
|
|
ggml_tensor * kq_b,
|
|
ggml_tensor * v_mla,
|
|
float kq_scale,
|
|
int il) const {
|
|
return build_attn_with_sinks(
|
|
inp,
|
|
wo,
|
|
wo_b,
|
|
q_cur,
|
|
k_cur,
|
|
v_cur,
|
|
kq_b,
|
|
v_mla,
|
|
nullptr,
|
|
kq_scale,
|
|
il);
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_attn_with_sinks(
|
|
llm_graph_input_attn_kv_unified_iswa * inp,
|
|
ggml_tensor * wo,
|
|
ggml_tensor * wo_b,
|
|
ggml_tensor * q_cur,
|
|
ggml_tensor * k_cur,
|
|
ggml_tensor * v_cur,
|
|
ggml_tensor * kq_b,
|
|
ggml_tensor * v_mla,
|
|
ggml_tensor * sinks,
|
|
float kq_scale,
|
|
int il) const {
|
|
// these nodes are added to the graph together so that they are not reordered
|
|
// by doing so, the number of splits in the graph is reduced
|
|
ggml_build_forward_expand(gf, q_cur);
|
|
|
|
if (k_cur) {
|
|
ggml_build_forward_expand(gf, k_cur);
|
|
}
|
|
|
|
if (v_cur) {
|
|
ggml_build_forward_expand(gf, v_cur);
|
|
}
|
|
|
|
const auto * mctx_iswa = inp->mctx;
|
|
|
|
const bool is_swa = hparams.is_swa(il);
|
|
|
|
const auto * mctx_cur = is_swa ? mctx_iswa->get_swa() : mctx_iswa->get_base();
|
|
|
|
// optionally store to KV cache
|
|
if (k_cur) {
|
|
const auto & k_idxs = is_swa ? inp->get_k_idxs_swa() : inp->get_k_idxs();
|
|
|
|
ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il));
|
|
}
|
|
|
|
if (v_cur) {
|
|
const auto & v_idxs = is_swa ? inp->get_v_idxs_swa() : inp->get_v_idxs();
|
|
|
|
ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il));
|
|
}
|
|
|
|
const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask();
|
|
|
|
ggml_tensor * q = q_cur;
|
|
ggml_tensor * k = mctx_cur->get_k(ctx0, il);
|
|
ggml_tensor * v = mctx_cur->get_v(ctx0, il);
|
|
|
|
ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, v_mla, sinks, kq_scale);
|
|
cb(cur, "kqv_out", il);
|
|
|
|
if (wo) {
|
|
cur = build_lora_mm(wo, cur);
|
|
}
|
|
|
|
if (wo_b) {
|
|
//cb(cur, "kqv_wo", il);
|
|
}
|
|
|
|
if (wo_b) {
|
|
cur = ggml_add(ctx0, cur, wo_b);
|
|
}
|
|
|
|
return cur;
|
|
}
|
|
|
|
llm_graph_input_attn_cross * llm_graph_context::build_attn_inp_cross() const {
|
|
auto inp = std::make_unique<llm_graph_input_attn_cross>(cross);
|
|
|
|
const int32_t n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train;
|
|
|
|
inp->cross_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_enc, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD), 1, 1);
|
|
ggml_set_input(inp->cross_kq_mask);
|
|
|
|
inp->cross_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->cross_kq_mask, GGML_TYPE_F16) : inp->cross_kq_mask;
|
|
|
|
return (llm_graph_input_attn_cross *) res->add_input(std::move(inp));
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_attn(
|
|
llm_graph_input_attn_cross * inp,
|
|
ggml_tensor * wo,
|
|
ggml_tensor * wo_b,
|
|
ggml_tensor * q_cur,
|
|
ggml_tensor * k_cur,
|
|
ggml_tensor * v_cur,
|
|
ggml_tensor * kq_b,
|
|
ggml_tensor * v_mla,
|
|
float kq_scale,
|
|
int il) const {
|
|
// these nodes are added to the graph together so that they are not reordered
|
|
// by doing so, the number of splits in the graph is reduced
|
|
ggml_build_forward_expand(gf, q_cur);
|
|
ggml_build_forward_expand(gf, k_cur);
|
|
ggml_build_forward_expand(gf, v_cur);
|
|
|
|
const auto & kq_mask = inp->get_kq_mask_cross();
|
|
|
|
ggml_tensor * q = q_cur;
|
|
ggml_tensor * k = k_cur;
|
|
ggml_tensor * v = v_cur;
|
|
|
|
ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, v_mla, nullptr, kq_scale);
|
|
cb(cur, "kqv_out", il);
|
|
|
|
if (wo) {
|
|
cur = build_lora_mm(wo, cur);
|
|
}
|
|
|
|
if (wo_b) {
|
|
//cb(cur, "kqv_wo", il);
|
|
}
|
|
|
|
if (wo_b) {
|
|
cur = ggml_add(ctx0, cur, wo_b);
|
|
}
|
|
|
|
return cur;
|
|
}
|
|
|
|
// TODO: maybe separate the inner implementation into a separate function
|
|
// like with the non-sliding window equivalent
|
|
// once sliding-window hybrid caches are a thing.
|
|
llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unified_iswa() const {
|
|
const auto * mctx_cur = static_cast<const llama_kv_cache_unified_iswa_context *>(mctx);
|
|
|
|
auto inp = std::make_unique<llm_graph_input_attn_kv_unified_iswa>(hparams, cparams, mctx_cur);
|
|
|
|
const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq;
|
|
|
|
{
|
|
const auto n_kv = mctx_cur->get_base()->get_n_kv();
|
|
|
|
inp->self_k_idxs = mctx_cur->get_base()->build_input_k_idxs(ctx0, ubatch);
|
|
inp->self_v_idxs = mctx_cur->get_base()->build_input_v_idxs(ctx0, ubatch);
|
|
|
|
inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream);
|
|
ggml_set_input(inp->self_kq_mask);
|
|
|
|
inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
|
|
}
|
|
|
|
{
|
|
GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified for non-SWA");
|
|
|
|
const auto n_kv = mctx_cur->get_swa()->get_n_kv();
|
|
|
|
inp->self_k_idxs_swa = mctx_cur->get_swa()->build_input_k_idxs(ctx0, ubatch);
|
|
inp->self_v_idxs_swa = mctx_cur->get_swa()->build_input_v_idxs(ctx0, ubatch);
|
|
|
|
inp->self_kq_mask_swa = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream);
|
|
ggml_set_input(inp->self_kq_mask_swa);
|
|
|
|
inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa;
|
|
}
|
|
|
|
return (llm_graph_input_attn_kv_unified_iswa *) res->add_input(std::move(inp));
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_rs(
|
|
ggml_tensor * s,
|
|
ggml_tensor * state_copy_main,
|
|
ggml_tensor * state_copy_extra,
|
|
int32_t state_size,
|
|
int32_t n_seqs,
|
|
uint32_t n_rs,
|
|
uint32_t rs_head,
|
|
uint32_t rs_size,
|
|
int32_t rs_zero,
|
|
const llm_graph_get_rows_fn & get_state_rows) const {
|
|
|
|
ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, rs_size);
|
|
|
|
// Clear a single state which will then be copied to the other cleared states.
|
|
// Note that this is a no-op when the view is zero-sized.
|
|
ggml_tensor * state_zero = ggml_view_1d(ctx0, states, state_size*(rs_zero >= 0), rs_zero*states->nb[1]*(rs_zero >= 0));
|
|
ggml_build_forward_expand(gf, ggml_scale_inplace(ctx0, state_zero, 0));
|
|
|
|
// copy states
|
|
// NOTE: assuming the copy destinations are ALL contained between rs_head and rs_head + n_rs
|
|
// {state_size, rs_size} -> {state_size, n_seqs}
|
|
ggml_tensor * output_states = get_state_rows(ctx0, states, state_copy_main);
|
|
ggml_build_forward_expand(gf, output_states);
|
|
|
|
// copy extra states which won't be changed further (between n_seqs and n_rs)
|
|
ggml_tensor * states_extra = ggml_get_rows(ctx0, states, state_copy_extra);
|
|
ggml_build_forward_expand(gf,
|
|
ggml_cpy(ctx0,
|
|
states_extra,
|
|
ggml_view_1d(ctx0, s, state_size*(n_rs - n_seqs), (rs_head + n_seqs)*state_size*ggml_element_size(s))));
|
|
|
|
return output_states;
|
|
}
|
|
|
|
static std::unique_ptr<llm_graph_input_rs> build_rs_inp_impl(
|
|
ggml_context * ctx0,
|
|
const llama_ubatch & ubatch,
|
|
const llama_memory_recurrent_context * mctx_cur) {
|
|
|
|
auto inp = std::make_unique<llm_graph_input_rs>(mctx_cur);
|
|
|
|
const int64_t n_rs = mctx_cur->get_n_rs();
|
|
const int64_t n_seqs = ubatch.n_seqs;
|
|
|
|
inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs);
|
|
ggml_set_input(inp->s_copy);
|
|
|
|
inp->s_copy_main = ggml_view_1d(ctx0, inp->s_copy, n_seqs, 0);
|
|
inp->s_copy_extra = ggml_view_1d(ctx0, inp->s_copy, n_rs - n_seqs, n_seqs * inp->s_copy->nb[0]);
|
|
|
|
return inp;
|
|
}
|
|
|
|
llm_graph_input_rs * llm_graph_context::build_rs_inp() const {
|
|
const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
|
|
|
|
auto inp = build_rs_inp_impl(ctx0, ubatch, mctx_cur);
|
|
|
|
return (llm_graph_input_rs *) res->add_input(std::move(inp));
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_rs(
|
|
llm_graph_input_rs * inp,
|
|
ggml_tensor * s,
|
|
int32_t state_size,
|
|
int32_t n_seqs,
|
|
const llm_graph_get_rows_fn & get_state_rows) const {
|
|
const auto * kv_state = inp->mctx;
|
|
|
|
return build_rs(s, inp->s_copy_main, inp->s_copy_extra, state_size, n_seqs,
|
|
kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(),
|
|
get_state_rows);
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_rwkv_token_shift_load(
|
|
llm_graph_input_rs * inp,
|
|
const llama_ubatch & ubatch,
|
|
int il) const {
|
|
const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
|
|
|
|
const auto token_shift_count = hparams.token_shift_count;
|
|
|
|
const int64_t n_seqs = ubatch.n_seqs;
|
|
|
|
ggml_tensor * token_shift_all = mctx_cur->get_r_l(il);
|
|
|
|
ggml_tensor * token_shift = build_rs(
|
|
inp, token_shift_all,
|
|
hparams.n_embd_r(), n_seqs);
|
|
|
|
token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs);
|
|
|
|
return token_shift;
|
|
}
|
|
|
|
ggml_tensor * llm_graph_context::build_rwkv_token_shift_store(
|
|
ggml_tensor * token_shift,
|
|
const llama_ubatch & ubatch,
|
|
int il) const {
|
|
const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
|
|
|
|
const auto token_shift_count = hparams.token_shift_count;
|
|
const auto n_embd = hparams.n_embd;
|
|
|
|
const int64_t n_seqs = ubatch.n_seqs;
|
|
|
|
const auto kv_head = mctx_cur->get_head();
|
|
|
|
return ggml_cpy(
|
|
ctx0,
|
|
ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0),
|
|
ggml_view_1d(ctx0, mctx_cur->get_r_l(il), hparams.n_embd_r()*n_seqs, hparams.n_embd_r()*kv_head*ggml_element_size(mctx_cur->get_r_l(il)))
|
|
);
|
|
}
|
|
|
|
llm_graph_input_mem_hybrid * llm_graph_context::build_inp_mem_hybrid() const {
|
|
const auto * mctx_cur = static_cast<const llama_memory_hybrid_context *>(mctx);
|
|
|
|
auto inp_rs = build_rs_inp_impl(ctx0, ubatch, mctx_cur->get_recr());
|
|
auto inp_attn = build_attn_inp_kv_unified_impl(ctx0, ubatch, hparams, cparams, mctx_cur->get_attn());
|
|
|
|
auto inp = std::make_unique<llm_graph_input_mem_hybrid>(std::move(inp_attn), std::move(inp_rs), mctx_cur);
|
|
|
|
return (llm_graph_input_mem_hybrid *) res->add_input(std::move(inp));
|
|
}
|
|
|
|
void llm_graph_context::build_pooling(
|
|
ggml_tensor * cls,
|
|
ggml_tensor * cls_b,
|
|
ggml_tensor * cls_out,
|
|
ggml_tensor * cls_out_b) const {
|
|
if (!cparams.embeddings) {
|
|
return;
|
|
}
|
|
|
|
ggml_tensor * inp = res->t_embd;
|
|
|
|
//// find result_norm tensor for input
|
|
//for (int i = ggml_graph_n_nodes(gf) - 1; i >= 0; --i) {
|
|
// inp = ggml_graph_node(gf, i);
|
|
// if (strcmp(inp->name, "result_norm") == 0 || strcmp(inp->name, "result_embd") == 0) {
|
|
// break;
|
|
// }
|
|
|
|
// inp = nullptr;
|
|
//}
|
|
|
|
GGML_ASSERT(inp != nullptr && "missing result_norm/result_embd tensor");
|
|
|
|
ggml_tensor * cur;
|
|
|
|
switch (pooling_type) {
|
|
case LLAMA_POOLING_TYPE_NONE:
|
|
{
|
|
cur = inp;
|
|
} break;
|
|
case LLAMA_POOLING_TYPE_MEAN:
|
|
{
|
|
ggml_tensor * inp_mean = build_inp_mean();
|
|
cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, inp)), inp_mean);
|
|
} break;
|
|
case LLAMA_POOLING_TYPE_CLS:
|
|
case LLAMA_POOLING_TYPE_LAST:
|
|
{
|
|
ggml_tensor * inp_cls = build_inp_cls();
|
|
cur = ggml_get_rows(ctx0, inp, inp_cls);
|
|
} break;
|
|
case LLAMA_POOLING_TYPE_RANK:
|
|
{
|
|
ggml_tensor * inp_cls = build_inp_cls();
|
|
inp = ggml_get_rows(ctx0, inp, inp_cls);
|
|
|
|
if (cls) {
|
|
// classification head
|
|
// https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566
|
|
cur = ggml_mul_mat(ctx0, cls, inp);
|
|
if (cls_b) {
|
|
cur = ggml_add(ctx0, cur, cls_b);
|
|
}
|
|
cur = ggml_tanh(ctx0, cur);
|
|
|
|
// some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
|
|
// https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896
|
|
if (cls_out) {
|
|
cur = ggml_mul_mat(ctx0, cls_out, cur);
|
|
if (cls_out_b) {
|
|
cur = ggml_add(ctx0, cur, cls_out_b);
|
|
}
|
|
}
|
|
} else if (cls_out) {
|
|
// Single layer classification head (direct projection)
|
|
// https://github.com/huggingface/transformers/blob/f4fc42216cd56ab6b68270bf80d811614d8d59e4/src/transformers/models/bert/modeling_bert.py#L1476
|
|
cur = ggml_mul_mat(ctx0, cls_out, inp);
|
|
if (cls_out_b) {
|
|
cur = ggml_add(ctx0, cur, cls_out_b);
|
|
}
|
|
} else {
|
|
GGML_ABORT("RANK pooling requires either cls+cls_b or cls_out+cls_out_b");
|
|
}
|
|
} break;
|
|
default:
|
|
{
|
|
GGML_ABORT("unknown pooling type");
|
|
}
|
|
}
|
|
|
|
cb(cur, "result_embd_pooled", -1);
|
|
res->t_embd_pooled = cur;
|
|
|
|
ggml_build_forward_expand(gf, cur);
|
|
}
|
|
|
|
int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) {
|
|
// TODO move to hparams if a T5 variant appears that uses a different value
|
|
const int64_t max_distance = 128;
|
|
|
|
if (bidirectional) {
|
|
n_buckets >>= 1;
|
|
}
|
|
|
|
const int64_t max_exact = n_buckets >> 1;
|
|
|
|
int32_t relative_position = x - y;
|
|
int32_t relative_bucket = 0;
|
|
|
|
if (bidirectional) {
|
|
relative_bucket += (relative_position > 0) * n_buckets;
|
|
relative_position = abs(relative_position);
|
|
} else {
|
|
relative_position = -std::min<int32_t>(relative_position, 0);
|
|
}
|
|
|
|
int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact));
|
|
relative_position_if_large = std::min<int32_t>(relative_position_if_large, n_buckets - 1);
|
|
relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large);
|
|
|
|
return relative_bucket;
|
|
}
|