mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-09 15:29:43 +00:00
fix server crashes (#2076)
This commit is contained in:
parent
cc45a7feb8
commit
1cf14ccef1
1 changed files with 2 additions and 2 deletions
|
@ -906,7 +906,7 @@ int main(int argc, char ** argv) {
|
|||
|
||||
while (llama.has_next_token) {
|
||||
const completion_token_output token_with_probs = llama.doCompletion();
|
||||
const std::string token_text = llama_token_to_str(llama.ctx, token_with_probs.tok);
|
||||
const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(llama.ctx, token_with_probs.tok);
|
||||
|
||||
stop_pos = llama.findStoppingStrings(llama.generated_text,
|
||||
token_text.size(), STOP_FULL);
|
||||
|
@ -933,7 +933,7 @@ int main(int argc, char ** argv) {
|
|||
|
||||
while (llama.has_next_token) {
|
||||
const completion_token_output token_with_probs = llama.doCompletion();
|
||||
const std::string token_text = llama_token_to_str(llama.ctx, token_with_probs.tok);
|
||||
const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(llama.ctx, token_with_probs.tok);
|
||||
if (llama.multibyte_pending > 0) {
|
||||
continue;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue