Skip to content

Commit 973053d

Browse files
authored
llama : fix loading models with shared tok_embd and output (#5651)
ggml-ci
1 parent 7c8bcc1 commit 973053d

File tree

1 file changed

+4
-8
lines changed

1 file changed

+4
-8
lines changed

llama.cpp

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2791,13 +2791,7 @@ struct llama_model_loader {
27912791

27922792
std::vector<no_init<uint8_t>> read_buf;
27932793

2794-
for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
2795-
struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
2796-
if (!cur) {
2797-
// some tensors may be allocated in a different context
2798-
continue;
2799-
}
2800-
2794+
for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
28012795
if (progress_callback) {
28022796
if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
28032797
return false;
@@ -3722,7 +3716,7 @@ static bool llm_load_tensors(
37223716
}
37233717

37243718
// create one context per buffer type
3725-
size_t ctx_size = ggml_tensor_overhead()*ml.n_tensors;
3719+
size_t ctx_size = ggml_tensor_overhead()*(ml.n_tensors + 1); // +1 for models where tok_embd is duplicated as output
37263720
std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
37273721
for (auto & it : buft_layer_count) {
37283722
struct ggml_init_params params = {
@@ -3860,6 +3854,7 @@ static bool llm_load_tensors(
38603854
} else {
38613855
model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // needs to be on GPU
38623856
ml.n_created--; // artificial tensor
3857+
ml.size_data += ggml_nbytes(model.output);
38633858
}
38643859
}
38653860

@@ -4396,6 +4391,7 @@ static bool llm_load_tensors(
43964391
model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
43974392
model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // same as tok_embd, duplicated to allow offloading
43984393
ml.n_created--; // artificial tensor
4394+
ml.size_data += ggml_nbytes(model.output);
43994395

44004396
const int64_t n_ff = hparams.n_ff;
44014397
const int64_t n_embd_head_k = hparams.n_embd_head_k;

0 commit comments

Comments
 (0)