mirror of
https://github.com/ggerganov/llama.cpp
synced 2026-03-26 00:50:47 +01:00
model: fix step3.5 n_rot (#20318)
This commit is contained in:
parent
59db9a357d
commit
0842b9b465
@ -7348,7 +7348,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
||||
// ("rope_freqs.weight") and ggml uses only the first (n_rot_l/2) entries per layer.
|
||||
uint32_t n_rot_max = 0;
|
||||
for (int i = 0; i < n_layer; ++i) {
|
||||
n_rot_max = std::max(n_rot_max, hparams.n_rot());
|
||||
n_rot_max = std::max(n_rot_max, hparams.n_rot(i));
|
||||
}
|
||||
if (n_rot_max == 0) {
|
||||
n_rot_max = n_rot;
|
||||
|
||||
Loading…
Reference in New Issue
Block a user