From 7ed03b8974269b6c48e55c4245d12fb3264a6cf5 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 5 Jul 2024 17:32:09 +0300 Subject: [PATCH] llama : fix compile warning (#8304) --- src/llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama.cpp b/src/llama.cpp index 95184c1083a1c..18956d441409f 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -7261,7 +7261,7 @@ static bool llm_load_tensors( layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); - if (i < hparams.n_layer_dense_lead) { + if (i < (int) hparams.n_layer_dense_lead) { layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}); layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});