mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-12 16:29:44 +00:00
metal : disable graph concurrency optimization due to bug (#2413)
This commit is contained in:
parent
b5472ea0ad
commit
1a941869cb
1 changed files with 4 additions and 3 deletions
|
@ -1722,9 +1722,10 @@ static bool llama_eval_internal(
|
|||
|
||||
#ifdef GGML_USE_METAL
|
||||
if (lctx.ctx_metal && N == 1) {
|
||||
if (!ggml_metal_if_optimized(lctx.ctx_metal)) {
|
||||
ggml_metal_graph_find_concurrency(lctx.ctx_metal, gf);
|
||||
}
|
||||
// TODO: disabled until #2413 is resolved
|
||||
//if (!ggml_metal_if_optimized(lctx.ctx_metal)) {
|
||||
// ggml_metal_graph_find_concurrency(lctx.ctx_metal, gf);
|
||||
//}
|
||||
ggml_metal_set_n_cb (lctx.ctx_metal, n_threads);
|
||||
ggml_metal_graph_compute(lctx.ctx_metal, gf);
|
||||
ggml_metal_get_tensor (lctx.ctx_metal, cur);
|
||||
|
|
Loading…
Reference in a new issue