mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-09 15:29:43 +00:00
Better error when using both LoRA + GPU layers (#1861)
This commit is contained in:
parent
254a7a7a5f
commit
6b8312e797
1 changed files with 8 additions and 0 deletions
|
@ -412,6 +412,14 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||||
gpt_print_usage(argc, argv, default_params);
|
gpt_print_usage(argc, argv, default_params);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef GGML_USE_CUBLAS
|
||||||
|
if (!params.lora_adapter.empty() && params.n_gpu_layers > 0) {
|
||||||
|
fprintf(stderr, "%s: error: the simultaneous use of LoRAs and GPU acceleration is not supported", __func__);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
#endif // GGML_USE_CUBLAS
|
||||||
|
|
||||||
if (escape_prompt) {
|
if (escape_prompt) {
|
||||||
process_escapes(params.prompt);
|
process_escapes(params.prompt);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue