diff --git a/llama.cpp b/llama.cpp index bc58ad9..69bfdc1 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1455,6 +1455,14 @@ static bool llama_eval_internal( // When we implement Matrix x Matrix Metal multiplication, we can avoid this branch. // But for now, we have focused only on Matrix x Vector Metal multiplication. // + // TODO: avoid these syncs via shared memory (ref #1696) + // + if (lctx.ctx_metal) { + // We need to sync the GPU KV cache with the CPU KV cache + ggml_metal_get_tensor(lctx.ctx_metal, kv_self.k); + ggml_metal_get_tensor(lctx.ctx_metal, kv_self.v); + } + ggml_graph_compute(ctx0, &gf); if (lctx.ctx_metal) {