We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 827f5ed commit d1f563aCopy full SHA for d1f563a
llama.cpp
@@ -1455,6 +1455,14 @@ static bool llama_eval_internal(
1455
// When we implement Matrix x Matrix Metal multiplication, we can avoid this branch.
1456
// But for now, we have focused only on Matrix x Vector Metal multiplication.
1457
//
1458
+ // TODO: avoid these syncs via shared memory (ref #1696)
1459
+ //
1460
+ if (lctx.ctx_metal) {
1461
+ // We need to sync the GPU KV cache with the CPU KV cache
1462
+ ggml_metal_get_tensor(lctx.ctx_metal, kv_self.k);
1463
+ ggml_metal_get_tensor(lctx.ctx_metal, kv_self.v);
1464
+ }
1465
+
1466
ggml_graph_compute(ctx0, &gf);
1467
1468
if (lctx.ctx_metal) {
0 commit comments