mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-09 15:29:43 +00:00
Don't interefe with BLAS for large prompts by running only 1 thread
This commit is contained in:
parent
ab77d76312
commit
4640eff23d
1 changed files with 4 additions and 1 deletions
|
@ -852,8 +852,11 @@ static bool llama_eval_internal(
|
|||
};
|
||||
|
||||
struct ggml_context * ctx0 = ggml_init(params);
|
||||
|
||||
// for big prompts, if BLAS is enabled, it is better to use only one thread
|
||||
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
|
||||
ggml_cgraph gf = {};
|
||||
gf.n_threads = n_threads;
|
||||
gf.n_threads = N > 255 && ggml_cpu_has_blas() ? 1 : n_threads;
|
||||
|
||||
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
||||
memcpy(embd->data, tokens, N*ggml_element_size(embd));
|
||||
|
|
Loading…
Reference in a new issue