mirror of
https://github.com/ollama/ollama.git
synced 2026-04-27 19:25:55 +02:00
Co-authored-by: Daniel Hiltgen <daniel@ollama.com> Co-authored-by: Gabe Goodhart <ghart@us.ibm.com> Co-authored-by: Shalini Salomi Bodapati <Shalini.Salomi.Bodapati@ibm.com>
24 lines
893 B
Diff
24 lines
893 B
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Michael Yang <git@mxy.ng>
|
|
Date: Mon, 18 Aug 2025 16:58:39 -0700
|
|
Subject: [PATCH] decode: disable output_all
|
|
|
|
---
|
|
src/llama-context.cpp | 3 +--
|
|
1 file changed, 1 insertion(+), 2 deletions(-)
|
|
|
|
diff --git a/src/llama-context.cpp b/src/llama-context.cpp
|
|
index 0b2b05c41..985f723db 100644
|
|
--- a/src/llama-context.cpp
|
|
+++ b/src/llama-context.cpp
|
|
@@ -1475,8 +1475,7 @@ int llama_context::decode(const llama_batch & batch_inp) {
|
|
const int64_t n_vocab = vocab.n_tokens();
|
|
const int64_t n_embd = hparams.n_embd_inp();
|
|
|
|
- // when computing embeddings, all tokens are output
|
|
- const bool output_all = cparams.embeddings;
|
|
+ const bool output_all = false;
|
|
const bool has_samplers = !sampling.samplers.empty();
|
|
|
|
const uint32_t n_seq_max = cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max;
|