2023-03-25 18:26:40 +00:00
|
|
|
#include "common.h"
|
|
|
|
#include "llama.h"
|
2023-05-01 16:23:47 +00:00
|
|
|
#include "build-info.h"
|
2023-03-25 18:26:40 +00:00
|
|
|
|
2023-04-16 10:13:00 +00:00
|
|
|
#include <ctime>
|
|
|
|
|
2023-03-25 18:26:40 +00:00
|
|
|
int main(int argc, char ** argv) {
|
|
|
|
gpt_params params;
|
|
|
|
|
|
|
|
if (gpt_params_parse(argc, argv, params) == false) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
params.embedding = true;
|
|
|
|
|
|
|
|
if (params.n_ctx > 2048) {
|
|
|
|
fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
|
|
|
|
"expect poor results\n", __func__, params.n_ctx);
|
|
|
|
}
|
|
|
|
|
2023-05-01 16:23:47 +00:00
|
|
|
fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
|
|
|
|
|
2023-05-02 16:23:44 +00:00
|
|
|
if (params.seed < 0) {
|
2023-03-25 18:26:40 +00:00
|
|
|
params.seed = time(NULL);
|
|
|
|
}
|
|
|
|
|
2023-05-01 16:23:47 +00:00
|
|
|
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
2023-03-25 18:26:40 +00:00
|
|
|
|
|
|
|
std::mt19937 rng(params.seed);
|
|
|
|
if (params.random_prompt) {
|
|
|
|
params.prompt = gpt_random_prompt(rng);
|
|
|
|
}
|
|
|
|
|
|
|
|
llama_context * ctx;
|
|
|
|
|
|
|
|
// load the model
|
2023-05-02 20:39:51 +00:00
|
|
|
ctx = llama_init_from_gpt_params(params);
|
|
|
|
if (ctx == NULL) {
|
|
|
|
fprintf(stderr, "%s: error: unable to load model\n", __func__);
|
|
|
|
return 1;
|
2023-03-25 18:26:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// print system information
|
|
|
|
{
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
|
|
|
params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
|
|
|
|
}
|
|
|
|
|
|
|
|
int n_past = 0;
|
|
|
|
|
|
|
|
// Add a space in front of the first character to match OG llama tokenizer behavior
|
|
|
|
params.prompt.insert(0, 1, ' ');
|
|
|
|
|
|
|
|
// tokenize the prompt
|
|
|
|
auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
|
|
|
|
|
|
|
|
if (params.verbose_prompt) {
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
|
|
|
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
|
|
|
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
|
|
|
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
|
|
|
|
}
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params.embedding){
|
|
|
|
if (embd_inp.size() > 0) {
|
|
|
|
if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads)) {
|
|
|
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-25 18:51:14 +00:00
|
|
|
const int n_embd = llama_n_embd(ctx);
|
2023-03-25 18:26:40 +00:00
|
|
|
const auto embeddings = llama_get_embeddings(ctx);
|
|
|
|
|
2023-03-25 18:51:14 +00:00
|
|
|
for (int i = 0; i < n_embd; i++) {
|
|
|
|
printf("%f ", embeddings[i]);
|
|
|
|
}
|
|
|
|
printf("\n");
|
2023-03-25 18:26:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
llama_print_timings(ctx);
|
|
|
|
llama_free(ctx);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|