mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-09 23:29:44 +00:00
794db3e7b9
A major rewrite for the server example. Note that if you have built something on the previous server API, it will probably be incompatible. Check out the examples for how a typical chat app could work. This took a lot of effort, there are 24 PR's closed in the submitter's repo alone, over 160 commits and a lot of comments and testing. Summary of the changes: - adds missing generation parameters: tfs_z, typical_p, repeat_last_n, repeat_penalty, presence_penalty, frequency_penalty, mirostat, penalize_nl, seed, ignore_eos - applies missing top k sampler - removes interactive mode/terminal-like behavior, removes exclude parameter - moves threads and batch size to server command-line parameters - adds LoRA loading and matches command line parameters with main example - fixes stopping on EOS token and with the specified token amount with n_predict - adds server timeouts, host, and port settings - adds expanded generation complete response; adds generation settings, stop reason, prompt truncated, model used, and final text - sets defaults for unspecified parameters between requests - removes /next-token endpoint and as_loop parameter, adds stream parameter and server-sent events for streaming - adds CORS headers to responses - adds request logging, exception printing and optional verbose logging - adds better stopping words handling when matching multiple tokens and while streaming, or when it finishes on a partial stop string - adds printing an error when it can't bind to the host/port specified - fixes multi-byte character handling and replaces invalid UTF-8 characters on responses - prints timing and build info on startup - adds logit bias to request parameters - removes embedding mode - updates documentation; adds streaming Node.js and Bash examples - fixes code formatting - sets server threads to 1 since the current global state doesn't work well with simultaneous requests - adds truncation of the input prompt and better context reset - removes token limit from the input prompt - significantly simplified the logic and removed a lot of variables --------- Co-authored-by: anon998 <131767832+anon998@users.noreply.github.com> Co-authored-by: Henri Vasserman <henv@hot.ee> Co-authored-by: Felix Hellmann <privat@cirk2.de> Co-authored-by: Johannes Gäßler <johannesg@5d6.de> Co-authored-by: Lesaun Harvey <Lesaun@gmail.com>
77 lines
1.8 KiB
Bash
77 lines
1.8 KiB
Bash
#!/bin/bash
|
|
|
|
API_URL="${API_URL:-http://127.0.0.1:8080}"
|
|
|
|
CHAT=(
|
|
"Hello, Assistant."
|
|
"Hello. How may I help you today?"
|
|
"Please tell me the largest city in Europe."
|
|
"Sure. The largest city in Europe is Moscow, the capital of Russia."
|
|
)
|
|
|
|
INSTRUCTION="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."
|
|
|
|
trim() {
|
|
shopt -s extglob
|
|
set -- "${1##+([[:space:]])}"
|
|
printf "%s" "${1%%+([[:space:]])}"
|
|
}
|
|
|
|
trim_trailing() {
|
|
shopt -s extglob
|
|
printf "%s" "${1%%+([[:space:]])}"
|
|
}
|
|
|
|
format_prompt() {
|
|
echo -n "${INSTRUCTION}"
|
|
printf "\n### Human: %s\n### Assistant: %s" "${CHAT[@]}" "$1"
|
|
}
|
|
|
|
tokenize() {
|
|
curl \
|
|
--silent \
|
|
--request POST \
|
|
--url "${API_URL}/tokenize" \
|
|
--data-raw "$(jq -ns --arg content "$1" '{content:$content}')" \
|
|
| jq '.tokens[]'
|
|
}
|
|
|
|
N_KEEP=$(tokenize "${INSTRUCTION}" | wc -l)
|
|
|
|
chat_completion() {
|
|
PROMPT="$(trim_trailing "$(format_prompt "$1")")"
|
|
DATA="$(echo -n "$PROMPT" | jq -Rs --argjson n_keep $N_KEEP '{
|
|
prompt: .,
|
|
temperature: 0.2,
|
|
top_k: 40,
|
|
top_p: 0.9,
|
|
n_keep: $n_keep,
|
|
n_predict: 256,
|
|
stop: ["\n### Human:"],
|
|
stream: true
|
|
}')"
|
|
|
|
ANSWER=''
|
|
|
|
while IFS= read -r LINE; do
|
|
if [[ $LINE = data:* ]]; then
|
|
CONTENT="$(echo "${LINE:5}" | jq -r '.content')"
|
|
printf "%s" "${CONTENT}"
|
|
ANSWER+="${CONTENT}"
|
|
fi
|
|
done < <(curl \
|
|
--silent \
|
|
--no-buffer \
|
|
--request POST \
|
|
--url "${API_URL}/completion" \
|
|
--data-raw "${DATA}")
|
|
|
|
printf "\n"
|
|
|
|
CHAT+=("$1" "$(trim "$ANSWER")")
|
|
}
|
|
|
|
while true; do
|
|
read -r -e -p "> " QUESTION
|
|
chat_completion "${QUESTION}"
|
|
done
|