llama.cpp/common/speculative.cpp
2026-01-25 01:16:06 +01:00

832 lines
31 KiB
C++

#include "speculative.h"
#include <cstring>
#include <algorithm>
#include <map>
#include <fstream>
#include "ggml.h"
#include "llama.h"
#include "log.h"
#include "common.h"
#include "ngram-cache.h"
#include "ngram-map.h"
#include "sampling.h"
#define SPEC_VOCAB_MAX_SIZE_DIFFERENCE 128
#define SPEC_VOCAB_CHECK_START_TOKEN_ID 5
const std::vector<enum common_speculative_type> common_speculative_types = {
COMMON_SPECULATIVE_TYPE_NONE,
COMMON_SPECULATIVE_TYPE_DRAFT,
COMMON_SPECULATIVE_TYPE_EAGLE3,
COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE,
COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K,
COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V,
COMMON_SPECULATIVE_TYPE_NGRAM_CACHE
};
const std::map<std::string, enum common_speculative_type> common_speculative_type_from_name_map = {
{"none", COMMON_SPECULATIVE_TYPE_NONE},
{"draft", COMMON_SPECULATIVE_TYPE_DRAFT},
{"eagle3", COMMON_SPECULATIVE_TYPE_EAGLE3},
{"ngram_simple", COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE},
{"ngram_map_k", COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K},
{"ngram_map_k4v", COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V},
{"ngram_cache", COMMON_SPECULATIVE_TYPE_NGRAM_CACHE}
};
struct common_speculative_state {
const enum common_speculative_type type;
size_t drafts_call_count = 0; // number of times this implementation was called.
size_t drafts_generated_count = 0; // number of times a draft or part was generated by this implementation.
size_t drafts_accepted_count = 0; // number of times a draft or part was accepted by the target model.
size_t drafts_generated_tokens = 0; // number of tokens generated by this implementation.
size_t drafts_accepted_tokens = 0; // number of tokens accepted by the target model.
virtual ~common_speculative_state() = default;
common_speculative_state(enum common_speculative_type type) : type(type) {}
};
struct common_speculative_state_draft : public common_speculative_state {
common_speculative_state_draft(enum common_speculative_type type) : common_speculative_state(type) {}
};
struct common_speculative_state_eagle3 : public common_speculative_state {
common_speculative_state_eagle3(enum common_speculative_type type) : common_speculative_state(type) {}
};
// state of self-speculation (simple implementation, not ngram-map)
struct common_speculative_state_ngram_simple : public common_speculative_state {
common_ngram_simple_state state;
common_speculative_state_ngram_simple(
enum common_speculative_type type,
common_ngram_simple_state state)
: common_speculative_state(type), state(std::move(state)) {}
};
struct common_speculative_state_ngram_map_k : public common_speculative_state {
common_ngram_map map; // draft ngram map for speculative decoding without draft model
common_speculative_state_ngram_map_k(
enum common_speculative_type type,
common_ngram_map map)
: common_speculative_state(type), map(map) {}
};
struct common_speculative_state_ngram_map_k4v : public common_speculative_state_ngram_map_k {
common_speculative_state_ngram_map_k4v(
enum common_speculative_type type,
common_ngram_map map)
: common_speculative_state_ngram_map_k(type, std::move(map)) {}
};
struct common_speculative_state_ngram_cache : public common_speculative_state {
uint16_t n_draft;
bool save_dynamic;
bool save_static;
common_ngram_cache ngram_cache_context;
common_ngram_cache ngram_cache_dynamic;
common_ngram_cache ngram_cache_static;
size_t cache_size = 0; // number of tokens in n-gram cache
common_speculative_state_ngram_cache(
const enum common_speculative_type type,
std::string & path_static,
std::string & path_dynamic,
uint16_t n_draft,
bool save_dynamic,
bool save_static)
: common_speculative_state(type)
, n_draft(n_draft)
, save_dynamic(save_dynamic)
, save_static(save_static)
{
if (!path_static.empty()) {
try {
ngram_cache_static = common_ngram_cache_load(path_static);
} catch (std::ifstream::failure const &) {
LOG_ERR("failed to open static lookup cache: %s", path_static.c_str());
GGML_ABORT("Couldn't read static lookup cache");
}
}
if (!path_dynamic.empty()) {
try {
ngram_cache_dynamic = common_ngram_cache_load(path_dynamic);
} catch (std::ifstream::failure const &) {
LOG_ERR("failed to open dynamic lookup cache: %s", path_dynamic.c_str());
GGML_ABORT("Couldn't read dynamic lookup cache");
}
}
}
};
struct common_speculative {
struct llama_context * ctx_tgt; // only used for retokenizing from ctx_dft
struct llama_context * ctx_dft;
struct common_sampler * smpl;
llama_batch batch;
llama_tokens prompt_dft;
bool vocab_dft_compatible = true; // whether retokenization is needed
std::map<std::string, std::string> tgt_dft_replacements = {};
std::vector<std::unique_ptr<common_speculative_state>> impls; // list of implementations to use and their states
common_speculative_state * curr_impl = nullptr; // current implementation in use (for stats)
};
common_ngram_map get_common_ngram_map(const common_speculative_config config, uint16_t size_ngram, uint16_t size_mgram);
struct common_speculative_state_ngram_cache create_state_ngram_cache(
std::string path_static, std::string path_dynamic,
common_speculative_config config);
common_ngram_map get_common_ngram_map(const common_speculative_config config, uint16_t size_ngram, uint16_t size_mgram) {
uint16_t size_key = size_ngram;
uint16_t size_value = size_mgram;
bool key_only = false;
uint16_t check_rate = 2;
uint16_t min_hits = 1;
const std::map<std::string, std::string> & cfg = config.config;
if (cfg.find("size_ngram") != cfg.end()) {
size_key = std::stoi(cfg.at("size_ngram"));
if (size_key < 1 || size_key > 1024) {
throw std::invalid_argument("size_ngram must be between 1 and 1024");
}
}
if (cfg.find("size_mgram") != cfg.end()) {
size_value = std::stoi(cfg.at("size_mgram"));
if (size_value < 1 || size_value > 1024) {
throw std::invalid_argument("size_mgram must be between 1 and 1024");
}
}
if (cfg.find("key_only") != cfg.end()) {
key_only = (cfg.at("key_only") == "true");
}
if (cfg.find("check_rate") != cfg.end()) {
check_rate = std::stoi(cfg.at("check_rate"));
if (check_rate < 1 || check_rate > 1024) {
throw std::invalid_argument("check_rate must be between 1 and 1024");
}
}
if (cfg.find("min_hits") != cfg.end()) {
min_hits = std::stoi(cfg.at("min_hits"));
if (min_hits < 1 || min_hits > 1024) {
throw std::invalid_argument("min_hits must be between 1 and 1024");
}
}
return common_ngram_map(size_key, size_value, key_only, check_rate, min_hits);
}
struct common_speculative_state_ngram_cache create_state_ngram_cache(
std::string path_static, std::string path_dynamic,
common_speculative_config config) {
uint16_t n_draft = 8;
bool save_static = false;
bool save_dynamic = false;
const std::map<std::string, std::string> & cfg = config.config;
if (cfg.find("n_draft") != cfg.end()) {
n_draft = std::stoi(cfg.at("n_draft"));
if (n_draft < 1 || n_draft > 1024) {
throw std::invalid_argument("ngram-cache: n_draft must be between 1 and 1024");
}
}
if (cfg.find("save_static") != cfg.end()) {
save_static = (cfg.at("save_static") == "true");
}
if (cfg.find("save_dynamic") != cfg.end()) {
save_dynamic = (cfg.at("save_dynamic") == "true");
}
common_speculative_state_ngram_cache state(config.type,
path_static, path_dynamic, n_draft, save_static, save_dynamic);
return state;
}
std::string common_speculative_type_name_str() {
std::string result = "";
for (size_t i = 0; i < common_speculative_types.size(); i++) {
if (i > 0) {
result += ", ";
}
result += common_speculative_type_to_str(common_speculative_types[i]);
}
return result;
}
std::string common_speculative_type_to_str(enum common_speculative_type type) {
switch (type) {
case COMMON_SPECULATIVE_TYPE_NONE: return "none";
case COMMON_SPECULATIVE_TYPE_DRAFT: return "draft";
case COMMON_SPECULATIVE_TYPE_EAGLE3: return "eagle3";
case COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE: return "ngram_simple";
case COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K: return "ngram_map_k";
case COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V: return "ngram_map_k4v";
case COMMON_SPECULATIVE_TYPE_NGRAM_CACHE: return "ngram_cache";
default: return "unknown";
}
}
enum common_speculative_type common_speculative_type_from_name(const std::string & name) {
const auto it = common_speculative_type_from_name_map.find(name);
if (it == common_speculative_type_from_name_map.end()) {
return COMMON_SPECULATIVE_TYPE_COUNT;
}
return it->second;
}
struct common_speculative * common_speculative_init(
struct common_params_speculative & params,
struct llama_context * ctx_tgt,
struct llama_context * ctx_dft
) {
std::vector<std::unique_ptr<common_speculative_state>> implementations = {};
for (const common_speculative_config & config : params.configs) {
LOG_INF("common_speculative_init: adding implementation %s\n", common_speculative_type_to_str(config.type).c_str());
switch (config.type) {
case COMMON_SPECULATIVE_TYPE_NONE:
break;
case COMMON_SPECULATIVE_TYPE_DRAFT: {
implementations.push_back(std::make_unique<common_speculative_state_draft>(config.type));
break;
}
case COMMON_SPECULATIVE_TYPE_EAGLE3: {
implementations.push_back(std::make_unique<common_speculative_state_eagle3>(config.type));
break;
}
case COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE: {
common_ngram_map ngram_map = get_common_ngram_map(config,
params.spec_ngram_size_n, params.spec_ngram_size_m);
uint16_t ngram_size_key = ngram_map.size_key;
uint16_t mgram_size_value = ngram_map.size_value;
uint16_t check_rate = ngram_map.check_rate;
auto config_simple = common_ngram_simple_config{
/* .size_ngram = */ ngram_size_key,
/* .size_mgram = */ mgram_size_value,
/* .check_rate = */ check_rate
};
auto state = std::make_unique<common_speculative_state_ngram_simple>(
/* .type = */ config.type,
/* .state = */ common_ngram_simple_state(config_simple)
);
implementations.push_back(std::move(state));
break;
}
case COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K: {
implementations.push_back(std::make_unique<common_speculative_state_ngram_map_k>(
(config.type),
get_common_ngram_map(config, params.spec_ngram_size_n, params.spec_ngram_size_m)
));
break;
}
case COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V: {
implementations.push_back(std::make_unique<common_speculative_state_ngram_map_k4v>(
(config.type),
get_common_ngram_map(config, params.spec_ngram_size_n, params.spec_ngram_size_m)
));
break;
}
case COMMON_SPECULATIVE_TYPE_NGRAM_CACHE: {
auto state = create_state_ngram_cache(
params.lookup_cache_static, params.lookup_cache_dynamic, config);
implementations.push_back(std::make_unique<common_speculative_state_ngram_cache>(state));
break;
}
default:
break;
}
}
auto * result = new common_speculative {
/* .ctx_tgt = */ ctx_tgt,
/* .ctx_dft = */ ctx_dft,
/* .smpl = */ nullptr,
/* .batch = */ llama_batch_init(ctx_dft ? llama_n_batch(ctx_dft) : 64, 0, 1),
/* .prompt_dft = */ {},
/* .vocab_dft_compatible = */ false,
/* .tgt_dft_replacements = */ {},
/* .impls = */ std::move(implementations)
};
// TODO: optimize or pass from outside?
#if 0
{
common_params_sampling params;
params.no_perf = false;
params.top_k = 40;
params.top_p = 0.9;
params.samplers = {
COMMON_SAMPLER_TYPE_TOP_K,
COMMON_SAMPLER_TYPE_TOP_P,
COMMON_SAMPLER_TYPE_INFILL,
};
result->smpl = common_sampler_init(llama_get_model(ctx_dft), params);
}
#else
{
common_params_sampling params;
params.no_perf = false;
params.top_k = 10;
params.samplers = {
COMMON_SAMPLER_TYPE_TOP_K,
};
if (ctx_dft) {
result->smpl = common_sampler_init(llama_get_model(ctx_dft), params);
}
}
#endif
result->vocab_dft_compatible = common_speculative_are_compatible(ctx_tgt, ctx_dft);
LOG_DBG("vocab_dft_compatible = %d\n", result->vocab_dft_compatible);
return result;
}
void common_speculative_free(struct common_speculative * spec) {
if (spec == nullptr) {
return;
}
common_sampler_free(spec->smpl);
llama_batch_free(spec->batch);
delete spec;
}
bool common_speculative_are_compatible(
const struct llama_context * ctx_tgt,
const struct llama_context * ctx_dft) {
if (ctx_tgt == nullptr && ctx_dft == nullptr) {
return true;
}
const struct llama_model * model_tgt = llama_get_model(ctx_tgt);
const struct llama_model * model_dft = llama_get_model(ctx_dft);
const struct llama_vocab * vocab_tgt = llama_model_get_vocab(model_tgt);
const struct llama_vocab * vocab_dft = llama_model_get_vocab(model_dft);
const bool vocab_type_tgt = llama_vocab_type(vocab_tgt);
LOG_DBG("%s: vocab_type tgt: %d\n", __func__, vocab_type_tgt);
const bool vocab_type_dft = llama_vocab_type(vocab_dft);
LOG_DBG("%s: vocab_type dft: %d\n", __func__, vocab_type_dft);
if (vocab_type_tgt != vocab_type_dft) {
LOG_DBG("%s: draft model vocab type must match target model to use speculation but ", __func__);
LOG_DBG("vocab_type_dft = %d while vocab_type_tgt = %d\n", vocab_type_dft, vocab_type_tgt);
return false;
}
if (
llama_vocab_get_add_bos(vocab_tgt) != llama_vocab_get_add_bos(vocab_dft) ||
llama_vocab_get_add_eos(vocab_tgt) != llama_vocab_get_add_eos(vocab_dft) ||
llama_vocab_bos(vocab_tgt) != llama_vocab_bos(vocab_dft) ||
llama_vocab_eos(vocab_tgt) != llama_vocab_eos(vocab_dft)
) {
LOG_DBG("%s: draft model special tokens must match target model to use speculation\n", __func__);
return false;
}
{
const int n_vocab_tgt = llama_vocab_n_tokens(vocab_tgt);
const int n_vocab_dft = llama_vocab_n_tokens(vocab_dft);
const int vocab_diff = n_vocab_tgt > n_vocab_dft
? n_vocab_tgt - n_vocab_dft
: n_vocab_dft - n_vocab_tgt;
if (vocab_diff > SPEC_VOCAB_MAX_SIZE_DIFFERENCE) {
LOG_DBG("%s: draft model vocab must closely match target model to use speculation but ", __func__);
LOG_DBG("target vocab size %d does not match draft vocab size %d - difference %d, max allowed %d\n",
n_vocab_tgt, llama_vocab_n_tokens(vocab_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE);
return false;
}
for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) {
const char * token_text_tgt = llama_vocab_get_text(vocab_tgt, i);
const char * token_text_dft = llama_vocab_get_text(vocab_dft, i);
if (std::strcmp(token_text_tgt, token_text_dft) != 0) {
LOG_DBG("%s: draft model vocab must match target model to use speculation but ", __func__);
LOG_DBG("token %d content differs - target '%s', draft '%s'\n", i,
common_token_to_piece(ctx_tgt, i).c_str(),
common_token_to_piece(ctx_dft, i).c_str());
return false;
}
}
}
return true;
}
void common_speculative_add_replacement_tgt_dft(
struct common_speculative * spec,
const char *source, const char *dest) {
spec->tgt_dft_replacements[source] = dest;
}
static std::string replace_to_dft(
struct common_speculative * spec,
const std::string& input) {
std::string result = input;
for (const auto & pair : spec->tgt_dft_replacements) {
size_t pos = result.find(pair.first);
while (pos != std::string::npos) {
result.replace(pos, pair.first.length(), pair.second);
pos = result.find(pair.first, pos + pair.second.length());
}
}
return result;
}
static std::string replace_to_tgt(
struct common_speculative * spec,
const std::string& input) {
std::string result = input;
for (const auto& pair : spec->tgt_dft_replacements) {
size_t pos = result.find(pair.second);
while (pos != std::string::npos) {
result.replace(pos, pair.second.length(), pair.first);
pos = result.find(pair.second, pos + pair.first.length());
}
}
return result;
}
llama_tokens common_speculative_use_draft_model(
struct common_speculative * spec,
struct common_speculative_params params,
const llama_tokens & prompt_tgt_main_model, // specified in target model vocab
llama_token id_last);
llama_tokens common_speculative_gen_ngram_cache(
common_speculative_state_ngram_cache & state,
const llama_tokens & tokens, llama_token sampled);
llama_tokens common_speculative_gen_draft(
struct common_speculative * spec,
struct common_speculative_params params,
const llama_tokens & prompt_tgt_main_model, // specified in target model vocab
llama_token id_last) {
llama_tokens result = {};
spec->curr_impl = nullptr; // reset current implementation
for (auto & impl : spec->impls) {
impl->drafts_call_count++;
// LOG name and call_count
switch (impl->type) {
case COMMON_SPECULATIVE_TYPE_NONE:
{
break;
}
case COMMON_SPECULATIVE_TYPE_DRAFT:
{
// Create a draft using a draft model.
result = common_speculative_use_draft_model(spec, params, prompt_tgt_main_model, id_last);
break;
}
case COMMON_SPECULATIVE_TYPE_EAGLE3:
{
// Work in progress: https://github.com/ggml-org/llama.cpp/pull/18039
break;
}
case COMMON_SPECULATIVE_TYPE_NGRAM_SIMPLE:
{
// Use common_ngram_map_draft to generate a draft from the current context.
auto * state = dynamic_cast<struct common_speculative_state_ngram_simple *>(impl.get());
if (state) {
result = common_ngram_simple_draft(state->state, prompt_tgt_main_model, id_last);
} else {
GGML_ABORT("unexpected implementation in type %d", impl.get()->type);
}
break;
}
case COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K:
{
// Use common_ngram_map_draft to generate a draft from the current context.
auto * state = dynamic_cast<common_speculative_state_ngram_map_k *>(impl.get());
if (state) {
common_ngram_map_draft(state->map, prompt_tgt_main_model, id_last, result);
} else {
GGML_ABORT("unexpected implementation in type %d", impl.get()->type);
}
break;
}
case COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V:
{
// Use common_ngram_map_draft to generate a draft from the current context.
auto * state = dynamic_cast<common_speculative_state_ngram_map_k *>(impl.get());
if (state) {
common_ngram_map_draft(state->map, prompt_tgt_main_model, id_last, result);
} else {
GGML_ABORT("unexpected implementation in type %d", impl.get()->type);
}
break;
}
case COMMON_SPECULATIVE_TYPE_NGRAM_CACHE:
{
auto * state= dynamic_cast<common_speculative_state_ngram_cache *>(impl.get());
if (state) {
result = common_speculative_gen_ngram_cache(*state, prompt_tgt_main_model, id_last);
} else {
GGML_ABORT("unexpected implementation in type %d", impl.get()->type);
}
break;
}
case COMMON_SPECULATIVE_TYPE_COUNT:
{
GGML_ABORT("invalid speculative type COUNT");
break;
}
}
if (!result.empty()) {
LOG_DBG("%s: called impl %s, hist size = %zu, call_count = %zu, gen = %zu\n", __func__,
common_speculative_type_to_str(impl.get()->type).c_str(),
prompt_tgt_main_model.size(),
impl.get()->drafts_call_count, result.size());
spec->curr_impl = impl.get(); // set current implementation for stats
impl->drafts_generated_count++;
impl->drafts_generated_tokens += result.size();
break; // We have a draft, so break out of the loop and return it.
}
}
return result;
}
llama_tokens common_speculative_use_draft_model(
struct common_speculative * spec,
struct common_speculative_params params,
const llama_tokens & prompt_tgt_main_model, // specified in target model vocab
llama_token id_last) {
auto & batch = spec->batch;
auto & ctx_tgt = spec->ctx_tgt;
auto & ctx_dft = spec->ctx_dft;
auto & smpl = spec->smpl;
auto & prompt_dft = spec->prompt_dft;
auto * mem_dft = llama_get_memory(ctx_dft);
int reuse_i = 0;
int reuse_n = 0;
const int n_ctx = llama_n_ctx(ctx_dft) - params.n_draft;
llama_tokens prompt_tgt_draft_model;
if (!spec->vocab_dft_compatible) {
std::string text;
text = common_detokenize(ctx_tgt, prompt_tgt_main_model, true);
text = replace_to_dft(spec, text);
LOG_DBG("%s: main->draft detokenized string: '%s'\n", __func__, text.c_str());
prompt_tgt_draft_model = common_tokenize(ctx_dft, text, false, true);
// convert id_last to draft vocab. llama_detokenize is called directly to avoid an allocation
const auto * model_tgt = llama_get_model(ctx_tgt);
const auto * vocab_tgt = llama_model_get_vocab(model_tgt);
int32_t n_chars = llama_detokenize(vocab_tgt, &id_last, 1, nullptr, 0, false, false);
GGML_ASSERT(n_chars < 0 && "failed to detokenize id_last");
text.resize(-n_chars);
llama_detokenize(vocab_tgt, &id_last, 1, text.data(), text.size(), false, false);
text = replace_to_dft(spec, text);
LOG_DBG("main->draft detokenized id_last(%d): '%s'\n", id_last, text.c_str());
id_last = common_tokenize(ctx_dft, text, false, true)[0];
}
// prompt_tgt's tokens will always be compatible with ctx_dft
const llama_tokens &prompt_tgt =
spec->vocab_dft_compatible ? prompt_tgt_main_model : prompt_tgt_draft_model;
const int i_start = std::max<int>(0, (int) prompt_tgt.size() - n_ctx);
// reuse as much as possible from the old draft context
// ideally, the draft context should be as big as the target context and we will always reuse the entire prompt
for (int i = 0; i < (int) prompt_dft.size(); ++i) {
int cur = 0;
while (i_start + cur < (int) prompt_tgt.size() &&
i + cur < (int) prompt_dft.size() &&
prompt_tgt[i_start + cur] == prompt_dft[i + cur]) {
cur++;
}
if ((cur >= params.n_reuse || n_ctx >= (int) prompt_tgt.size()) && cur > reuse_n) {
reuse_i = i;
reuse_n = cur;
}
}
LOG_DBG("%s: reuse_i = %d, reuse_n = %d, prompt = %d\n", __func__, reuse_i, reuse_n, (int) prompt_dft.size());
llama_tokens result;
result.reserve(params.n_draft);
if (reuse_n == 0) {
llama_memory_clear(mem_dft, false);
prompt_dft.clear();
} else {
// this happens when a previous draft has been discarded (for example, due to being too small), but the
// target model agreed with it. in this case, we simply pass back the previous results to save compute
if (reuse_i + reuse_n < (int) prompt_dft.size() && prompt_dft[reuse_i + reuse_n] == id_last) {
for (int i = reuse_i + reuse_n + 1; i < (int) prompt_dft.size(); ++i) {
result.push_back(prompt_dft[i]);
if (params.n_draft <= (int) result.size()) {
break;
}
}
return result;
}
if (reuse_i > 0) {
llama_memory_seq_rm (mem_dft, 0, 0, reuse_i);
llama_memory_seq_add(mem_dft, 0, reuse_i, -1, -reuse_i);
prompt_dft.erase(prompt_dft.begin(), prompt_dft.begin() + reuse_i);
}
if (reuse_n < (int) prompt_dft.size()) {
llama_memory_seq_rm (mem_dft, 0, reuse_n, -1);
prompt_dft.erase(prompt_dft.begin() + reuse_n, prompt_dft.end());
}
}
// prepare a batch to evaluate any new tokens in the prompt
common_batch_clear(batch);
for (size_t i = i_start + reuse_n; i < prompt_tgt.size(); ++i) {
//LOG_DBG("i = %d, i_start = %d, reuse_n = %d, i - i_start = %d, id = %6d\n", i, i_start, reuse_n, i - i_start, prompt_tgt[i]);
common_batch_add(batch, prompt_tgt[i], i - i_start, { 0 }, false);
prompt_dft.push_back(prompt_tgt[i]);
}
// we should rarely end-up here during normal decoding
if (batch.n_tokens > 0) {
//LOG_DBG("%s: draft prompt batch: %s\n", __func__, string_from(ctx, batch).c_str());
llama_decode(ctx_dft, batch);
}
const llama_pos n_past = prompt_dft.size();
LOG_DBG("%s: n_past = %d\n", __func__, n_past);
common_batch_clear(batch);
common_batch_add (batch, id_last, n_past, { 0 }, true);
prompt_dft.push_back(id_last);
LOG_DBG("%s: draft prompt: %s\n", __func__, string_from(ctx_dft, prompt_dft).c_str());
llama_decode(ctx_dft, batch);
common_sampler_reset(smpl);
// sample n_draft tokens from the draft model
for (int i = 0; i < params.n_draft; ++i) {
common_batch_clear(batch);
common_sampler_sample(smpl, ctx_dft, 0, true);
const auto * cur_p = common_sampler_get_candidates(smpl, true);
for (int k = 0; k < std::min(3, (int) cur_p->size); ++k) {
LOG_DBG(" - draft candidate %3d, pos %3d: %6d (%8.3f) '%s'\n",
k, i, cur_p->data[k].id, cur_p->data[k].p, common_token_to_piece(ctx_dft, cur_p->data[k].id).c_str());
}
// add drafted token for each sequence
const llama_token id = cur_p->data[0].id;
common_sampler_accept(smpl, id, true);
result.push_back(id);
if (params.n_draft <= (int) result.size()) {
break;
}
// only collect very high-confidence draft tokens
if (cur_p->data[0].p < params.p_min) {
break;
}
common_batch_add(batch, id, n_past + i + 1, { 0 }, true);
// evaluate the drafted tokens on the draft model
llama_decode(ctx_dft, batch);
prompt_dft.push_back(id);
}
if (!spec->vocab_dft_compatible) {
std::string detokenized = common_detokenize(ctx_dft, result, true);
detokenized = replace_to_tgt(spec, detokenized);
LOG_DBG("draft->main detokenized string: '%s'\n", detokenized.c_str());
result = common_tokenize(ctx_tgt, detokenized, false, true);
if (result.size() > (size_t)params.n_draft) {
result.resize(params.n_draft);
}
}
return result;
}
void common_speculative_accept(struct common_speculative * spec, const uint16_t n_accepted) {
common_speculative_state * impl = spec->curr_impl;
if (impl != nullptr) {
if (n_accepted > 0) {
impl->drafts_accepted_count++;
impl->drafts_accepted_tokens += n_accepted;
}
if (impl->type == COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K ||
impl->type == COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V) {
auto * state = dynamic_cast<struct common_speculative_state_ngram_map_k *>(impl);
if (state) {
common_ngram_map_accept(state->map, n_accepted);
}
}
}
}
void common_speculative_print_stats(const struct common_speculative * spec) {
if (spec == nullptr) {
return;
}
for (const auto & impl : spec->impls) {
LOG_INF("statistics %s: #calls = %zu, #gen drafts = %zu, #acc drafts = %zu, #gen tokens = %zu, #acc tokens = %zu\n",
common_speculative_type_to_str(impl->type).c_str(),
impl->drafts_call_count,
impl->drafts_generated_count,
impl->drafts_accepted_count,
impl->drafts_generated_tokens,
impl->drafts_accepted_tokens);
}
}
// n-gram cache
//
/**
* Perform speculative generation using a 3-tier n-gram cache.
*
* @param state Current state of this implementation
* @param tokens Token history to search in
* @param sampled Last sampled token
* @return Vector of draft tokens, empty if draft is found
*/
llama_tokens common_speculative_gen_ngram_cache(
common_speculative_state_ngram_cache & state,
const llama_tokens & tokens, llama_token sampled) {
if (state.cache_size < tokens.size() + 1) {
llama_tokens tokens_new;
tokens_new.reserve(tokens.size() + 1 - state.cache_size);
for (size_t j = state.cache_size; j < tokens.size(); ++j) {
tokens_new.push_back(tokens[j]);
}
tokens_new.push_back(sampled); // add the last token
// Update context ngram cache with new tokens:
common_ngram_cache_update(state.ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX,
tokens_new, tokens_new.size(), false);
state.cache_size = tokens.size() + 1;
}
llama_tokens inp;
inp.reserve(tokens.size() + 1);
for (size_t j = 0; j < tokens.size(); ++j) {
inp.push_back(tokens[j]);
}
inp.push_back(sampled);
llama_tokens draft;
draft.push_back(sampled);
common_ngram_cache_draft(inp, draft, state.n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX,
state.ngram_cache_context,
state.ngram_cache_dynamic,
state.ngram_cache_static);
if (draft.size() > 0) {
// delete first token in draft (which is the sampled token)
draft.erase(draft.begin());
}
return draft;
}