2023-03-10 18:40:58 +00:00
|
|
|
#include "ggml.h"
|
|
|
|
|
|
|
|
#include "utils.h"
|
|
|
|
|
|
|
|
#include <cassert>
|
2023-03-20 10:17:23 +00:00
|
|
|
#include <cinttypes>
|
2023-03-10 18:40:58 +00:00
|
|
|
#include <cmath>
|
|
|
|
#include <cstdio>
|
|
|
|
#include <cstring>
|
|
|
|
#include <fstream>
|
2023-03-19 19:44:30 +00:00
|
|
|
#include <iostream>
|
2023-03-10 18:40:58 +00:00
|
|
|
#include <map>
|
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
|
|
|
|
2023-03-13 03:08:01 +00:00
|
|
|
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
2023-03-12 21:13:28 +00:00
|
|
|
#include <signal.h>
|
|
|
|
#include <unistd.h>
|
2023-03-15 19:56:24 +00:00
|
|
|
#elif defined (_WIN32)
|
|
|
|
#include <signal.h>
|
2023-03-13 03:08:01 +00:00
|
|
|
#endif
|
2023-03-12 21:13:28 +00:00
|
|
|
|
2023-03-21 16:14:46 +00:00
|
|
|
#if defined (_WIN32)
|
|
|
|
#pragma comment(lib,"kernel32.lib")
|
|
|
|
extern "C" __declspec(dllimport) void* __stdcall GetStdHandle(unsigned long nStdHandle);
|
|
|
|
extern "C" __declspec(dllimport) int __stdcall GetConsoleMode(void* hConsoleHandle, unsigned long* lpMode);
|
|
|
|
extern "C" __declspec(dllimport) int __stdcall SetConsoleMode(void* hConsoleHandle, unsigned long dwMode);
|
|
|
|
#endif
|
|
|
|
|
2023-03-12 21:13:28 +00:00
|
|
|
#define ANSI_COLOR_RED "\x1b[31m"
|
|
|
|
#define ANSI_COLOR_GREEN "\x1b[32m"
|
|
|
|
#define ANSI_COLOR_YELLOW "\x1b[33m"
|
|
|
|
#define ANSI_COLOR_BLUE "\x1b[34m"
|
|
|
|
#define ANSI_COLOR_MAGENTA "\x1b[35m"
|
|
|
|
#define ANSI_COLOR_CYAN "\x1b[36m"
|
|
|
|
#define ANSI_COLOR_RESET "\x1b[0m"
|
|
|
|
#define ANSI_BOLD "\x1b[1m"
|
|
|
|
|
2023-03-19 18:22:48 +00:00
|
|
|
static const int EOS_TOKEN_ID = 2;
|
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
// determine number of model parts based on the dimension
|
|
|
|
static const std::map<int, int> LLAMA_N_PARTS = {
|
|
|
|
{ 4096, 1 },
|
|
|
|
{ 5120, 2 },
|
|
|
|
{ 6656, 4 },
|
|
|
|
{ 8192, 8 },
|
|
|
|
};
|
|
|
|
|
2023-03-10 18:40:58 +00:00
|
|
|
// default hparams (LLaMA 7B)
|
|
|
|
struct llama_hparams {
|
|
|
|
int32_t n_vocab = 32000;
|
|
|
|
int32_t n_ctx = 512; // this is provided as user input?
|
|
|
|
int32_t n_embd = 4096;
|
|
|
|
int32_t n_mult = 256;
|
|
|
|
int32_t n_head = 32;
|
|
|
|
int32_t n_layer = 32;
|
|
|
|
int32_t n_rot = 64;
|
|
|
|
int32_t f16 = 1;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct llama_layer {
|
|
|
|
// normalization
|
|
|
|
struct ggml_tensor * attention_norm;
|
|
|
|
|
|
|
|
// attention
|
|
|
|
struct ggml_tensor * wq;
|
|
|
|
struct ggml_tensor * wk;
|
|
|
|
struct ggml_tensor * wv;
|
|
|
|
struct ggml_tensor * wo;
|
|
|
|
|
|
|
|
// normalization
|
|
|
|
struct ggml_tensor * ffn_norm;
|
|
|
|
|
|
|
|
// ff
|
|
|
|
struct ggml_tensor * w1;
|
|
|
|
struct ggml_tensor * w2;
|
|
|
|
struct ggml_tensor * w3;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct llama_model {
|
|
|
|
llama_hparams hparams;
|
|
|
|
|
|
|
|
struct ggml_tensor * tok_embeddings;
|
|
|
|
|
|
|
|
struct ggml_tensor * norm;
|
|
|
|
struct ggml_tensor * output;
|
|
|
|
|
|
|
|
std::vector<llama_layer> layers;
|
|
|
|
|
|
|
|
// key + value memory
|
|
|
|
struct ggml_tensor * memory_k;
|
|
|
|
struct ggml_tensor * memory_v;
|
|
|
|
|
|
|
|
//
|
|
|
|
struct ggml_context * ctx;
|
|
|
|
std::map<std::string, struct ggml_tensor *> tensors;
|
|
|
|
};
|
|
|
|
|
|
|
|
// load the model's weights from a file
|
2023-03-21 15:42:43 +00:00
|
|
|
|
|
|
|
bool llama_model_load(const std::string & fname, llama_model & model, llama_vocab & vocab, int n_ctx, int n_parts, ggml_type memory_type = GGML_TYPE_F32) {
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-13 16:33:43 +00:00
|
|
|
std::vector<char> f_buf(1024*1024);
|
|
|
|
|
2023-03-10 18:40:58 +00:00
|
|
|
auto fin = std::ifstream(fname, std::ios::binary);
|
2023-03-13 16:33:43 +00:00
|
|
|
fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
|
2023-03-10 18:40:58 +00:00
|
|
|
if (!fin) {
|
|
|
|
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// verify magic
|
|
|
|
{
|
|
|
|
uint32_t magic;
|
|
|
|
fin.read((char *) &magic, sizeof(magic));
|
2023-03-20 19:26:01 +00:00
|
|
|
if (magic == FILE_MAGIC_UNVERSIONED) {
|
2023-03-20 10:17:23 +00:00
|
|
|
fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
|
|
|
|
__func__, fname.c_str());
|
|
|
|
return false;
|
|
|
|
}
|
2023-03-20 19:26:01 +00:00
|
|
|
if (magic != FILE_MAGIC) {
|
2023-03-10 18:40:58 +00:00
|
|
|
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
|
|
|
|
return false;
|
|
|
|
}
|
2023-03-20 10:17:23 +00:00
|
|
|
|
|
|
|
uint32_t format_version;
|
|
|
|
fin.read((char *) &format_version, sizeof(format_version));
|
|
|
|
|
2023-03-20 19:26:01 +00:00
|
|
|
if (format_version != FILE_VERSION) {
|
|
|
|
fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ", expected %d)\n",
|
|
|
|
__func__, fname.c_str(), format_version, FILE_VERSION);
|
2023-03-20 10:17:23 +00:00
|
|
|
return false;
|
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int n_ff = 0;
|
|
|
|
|
|
|
|
// load hparams
|
|
|
|
{
|
|
|
|
auto & hparams = model.hparams;
|
|
|
|
|
|
|
|
fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
|
|
|
|
//fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
|
|
|
|
fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
|
|
|
|
fin.read((char *) &hparams.n_mult, sizeof(hparams.n_mult));
|
|
|
|
fin.read((char *) &hparams.n_head, sizeof(hparams.n_head));
|
|
|
|
fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
|
|
|
|
fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot));
|
|
|
|
fin.read((char *) &hparams.f16, sizeof(hparams.f16));
|
|
|
|
|
|
|
|
hparams.n_ctx = n_ctx;
|
|
|
|
|
|
|
|
n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
|
2023-03-21 15:42:43 +00:00
|
|
|
|
|
|
|
if (n_parts < 1) {
|
|
|
|
n_parts = LLAMA_N_PARTS.at(hparams.n_embd);
|
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
|
Importer for GPTQ quantized LLaMA models (#301)
* [WIP, broken] Importer for GPTQ quantized LLaMA models
Based on: https://github.com/qwopqwop200/GPTQ-for-LLaMa
Current status: Something is busted. The output starts out decent, but
quickly degrades into gibberish. This doesn't happen with either the
original GPTQ-for-LLaMa using the same weights, or llama.cpp when using
weights quantized by its own quantizer. Is there a bug in the
conversion script that somehow only comes into play with a large context
size?
I did notice one potential issue. It's clearly not the main cause of
the gibberish, since it doesn't happen when using q4_1 weights quantized
by llama.cpp itself, but it seems concerning. When doing a matrix
multiplication of f16 * f32 => f32 or q4_1 * f32 => f32, at least when
the multiplication is not done with BLAS, the intermediate results are
stored in the smaller format rather than f32. This seems like an
unnecessary waste of precision, especially in the q4_1 case.
I was originally hoping to validate the results by matching the Python
implementation's output exactly, but precision and non-associativity
issues make this very difficult, including when performing matrix
multiplications and, especially, computing norms.
Anyway, design details:
The models being imported store per-layer weights in essentially q4_1
format, although the addend and scale are shared across an entire row
rather than every group of 32 weights. This script duplicates the
addend and scale to match ggml's expectations, at the cost of wasting
some memory.
However, there are two differences which I accommodated changing the
output format (and adding corresponding support to main.cpp) rather than
having the script match the existing one:
- The tok_embeddings and output weights (i.e. the weights that aren't
per-layer) are f16 instead of q4_1. They could be converted to q4_1,
and the impact of the loss of precision would probably be low, but
this would rule out exactly matching the Python implementation's
output for validation.
- There is no sharding, since the input doesn't have it, and for a
CPU-only implementation it seems more useful to avoid having to deal
with multiple files.
The new format is differentiated from existing q4_1 format by changing
the 'f16' header flag to a new value, 4. That said, I think a cleaner
approach would be to change main.cpp to support loading each tensor with
an arbitrary sharding configuration and type rather than hardcoding
specific combinations of types. So far I've wasted too much time
debugging to try implementing this...
* Add missing permutation. Now it works.
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2023-03-21 16:42:25 +00:00
|
|
|
// temp warning to tell the user to use "--n_parts"
|
|
|
|
if (hparams.f16 == 4 && n_parts != 1) {
|
|
|
|
fprintf(stderr, "%s: GPTQ model detected - are you sure n_parts should be %d? we normally expect it to be 1\n", __func__, n_parts);
|
|
|
|
fprintf(stderr, "%s: use '--n_parts 1' if necessary\n", __func__);
|
|
|
|
}
|
|
|
|
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
|
|
|
fprintf(stderr, "%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
|
|
|
fprintf(stderr, "%s: n_embd = %d\n", __func__, hparams.n_embd);
|
|
|
|
fprintf(stderr, "%s: n_mult = %d\n", __func__, hparams.n_mult);
|
|
|
|
fprintf(stderr, "%s: n_head = %d\n", __func__, hparams.n_head);
|
|
|
|
fprintf(stderr, "%s: n_layer = %d\n", __func__, hparams.n_layer);
|
|
|
|
fprintf(stderr, "%s: n_rot = %d\n", __func__, hparams.n_rot);
|
|
|
|
fprintf(stderr, "%s: f16 = %d\n", __func__, hparams.f16);
|
|
|
|
fprintf(stderr, "%s: n_ff = %d\n", __func__, n_ff);
|
|
|
|
fprintf(stderr, "%s: n_parts = %d\n", __func__, n_parts);
|
2023-03-10 18:40:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// load vocab
|
|
|
|
{
|
|
|
|
std::string word;
|
2023-03-21 15:59:16 +00:00
|
|
|
std::vector<char> tmp(64);
|
|
|
|
|
2023-03-18 13:51:49 +00:00
|
|
|
for (int i = 0; i < model.hparams.n_vocab; i++) {
|
2023-03-10 18:40:58 +00:00
|
|
|
uint32_t len;
|
|
|
|
fin.read((char *) &len, sizeof(len));
|
|
|
|
|
|
|
|
word.resize(len);
|
2023-03-21 15:59:16 +00:00
|
|
|
if (len > 0) {
|
|
|
|
tmp.resize(len);
|
|
|
|
fin.read(tmp.data(), len);
|
|
|
|
word.assign(tmp.data(), len);
|
|
|
|
} else {
|
|
|
|
word.clear();
|
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-20 10:17:23 +00:00
|
|
|
float score;
|
|
|
|
fin.read((char *) &score, sizeof(score));
|
|
|
|
|
2023-03-10 18:40:58 +00:00
|
|
|
vocab.token_to_id[word] = i;
|
|
|
|
vocab.id_to_token[i] = word;
|
2023-03-20 10:17:23 +00:00
|
|
|
vocab.score[i] = score;
|
2023-03-10 18:40:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
|
|
|
|
// in order to save memory and also to speed up the computation
|
Importer for GPTQ quantized LLaMA models (#301)
* [WIP, broken] Importer for GPTQ quantized LLaMA models
Based on: https://github.com/qwopqwop200/GPTQ-for-LLaMa
Current status: Something is busted. The output starts out decent, but
quickly degrades into gibberish. This doesn't happen with either the
original GPTQ-for-LLaMa using the same weights, or llama.cpp when using
weights quantized by its own quantizer. Is there a bug in the
conversion script that somehow only comes into play with a large context
size?
I did notice one potential issue. It's clearly not the main cause of
the gibberish, since it doesn't happen when using q4_1 weights quantized
by llama.cpp itself, but it seems concerning. When doing a matrix
multiplication of f16 * f32 => f32 or q4_1 * f32 => f32, at least when
the multiplication is not done with BLAS, the intermediate results are
stored in the smaller format rather than f32. This seems like an
unnecessary waste of precision, especially in the q4_1 case.
I was originally hoping to validate the results by matching the Python
implementation's output exactly, but precision and non-associativity
issues make this very difficult, including when performing matrix
multiplications and, especially, computing norms.
Anyway, design details:
The models being imported store per-layer weights in essentially q4_1
format, although the addend and scale are shared across an entire row
rather than every group of 32 weights. This script duplicates the
addend and scale to match ggml's expectations, at the cost of wasting
some memory.
However, there are two differences which I accommodated changing the
output format (and adding corresponding support to main.cpp) rather than
having the script match the existing one:
- The tok_embeddings and output weights (i.e. the weights that aren't
per-layer) are f16 instead of q4_1. They could be converted to q4_1,
and the impact of the loss of precision would probably be low, but
this would rule out exactly matching the Python implementation's
output for validation.
- There is no sharding, since the input doesn't have it, and for a
CPU-only implementation it seems more useful to avoid having to deal
with multiple files.
The new format is differentiated from existing q4_1 format by changing
the 'f16' header flag to a new value, 4. That said, I think a cleaner
approach would be to change main.cpp to support loading each tensor with
an arbitrary sharding configuration and type rather than hardcoding
specific combinations of types. So far I've wasted too much time
debugging to try implementing this...
* Add missing permutation. Now it works.
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2023-03-21 16:42:25 +00:00
|
|
|
// wtype is for per-layer weights, while vtype is for other weights
|
|
|
|
ggml_type wtype, vtype;
|
2023-03-10 18:40:58 +00:00
|
|
|
switch (model.hparams.f16) {
|
Importer for GPTQ quantized LLaMA models (#301)
* [WIP, broken] Importer for GPTQ quantized LLaMA models
Based on: https://github.com/qwopqwop200/GPTQ-for-LLaMa
Current status: Something is busted. The output starts out decent, but
quickly degrades into gibberish. This doesn't happen with either the
original GPTQ-for-LLaMa using the same weights, or llama.cpp when using
weights quantized by its own quantizer. Is there a bug in the
conversion script that somehow only comes into play with a large context
size?
I did notice one potential issue. It's clearly not the main cause of
the gibberish, since it doesn't happen when using q4_1 weights quantized
by llama.cpp itself, but it seems concerning. When doing a matrix
multiplication of f16 * f32 => f32 or q4_1 * f32 => f32, at least when
the multiplication is not done with BLAS, the intermediate results are
stored in the smaller format rather than f32. This seems like an
unnecessary waste of precision, especially in the q4_1 case.
I was originally hoping to validate the results by matching the Python
implementation's output exactly, but precision and non-associativity
issues make this very difficult, including when performing matrix
multiplications and, especially, computing norms.
Anyway, design details:
The models being imported store per-layer weights in essentially q4_1
format, although the addend and scale are shared across an entire row
rather than every group of 32 weights. This script duplicates the
addend and scale to match ggml's expectations, at the cost of wasting
some memory.
However, there are two differences which I accommodated changing the
output format (and adding corresponding support to main.cpp) rather than
having the script match the existing one:
- The tok_embeddings and output weights (i.e. the weights that aren't
per-layer) are f16 instead of q4_1. They could be converted to q4_1,
and the impact of the loss of precision would probably be low, but
this would rule out exactly matching the Python implementation's
output for validation.
- There is no sharding, since the input doesn't have it, and for a
CPU-only implementation it seems more useful to avoid having to deal
with multiple files.
The new format is differentiated from existing q4_1 format by changing
the 'f16' header flag to a new value, 4. That said, I think a cleaner
approach would be to change main.cpp to support loading each tensor with
an arbitrary sharding configuration and type rather than hardcoding
specific combinations of types. So far I've wasted too much time
debugging to try implementing this...
* Add missing permutation. Now it works.
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2023-03-21 16:42:25 +00:00
|
|
|
case 0: wtype = vtype = GGML_TYPE_F32; break;
|
|
|
|
case 1: wtype = vtype = GGML_TYPE_F16; break;
|
|
|
|
case 2: wtype = vtype = GGML_TYPE_Q4_0; break;
|
|
|
|
case 3: wtype = vtype = GGML_TYPE_Q4_1; break;
|
|
|
|
case 4: wtype = GGML_TYPE_Q4_1; vtype = GGML_TYPE_F16; break;
|
2023-03-10 18:40:58 +00:00
|
|
|
default:
|
|
|
|
{
|
|
|
|
fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n",
|
|
|
|
__func__, fname.c_str(), model.hparams.f16);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
auto & ctx = model.ctx;
|
|
|
|
|
|
|
|
size_t ctx_size = 0;
|
|
|
|
|
|
|
|
{
|
|
|
|
const auto & hparams = model.hparams;
|
|
|
|
|
|
|
|
const int n_embd = hparams.n_embd;
|
|
|
|
const int n_layer = hparams.n_layer;
|
|
|
|
const int n_ctx = hparams.n_ctx;
|
|
|
|
const int n_vocab = hparams.n_vocab;
|
|
|
|
|
Importer for GPTQ quantized LLaMA models (#301)
* [WIP, broken] Importer for GPTQ quantized LLaMA models
Based on: https://github.com/qwopqwop200/GPTQ-for-LLaMa
Current status: Something is busted. The output starts out decent, but
quickly degrades into gibberish. This doesn't happen with either the
original GPTQ-for-LLaMa using the same weights, or llama.cpp when using
weights quantized by its own quantizer. Is there a bug in the
conversion script that somehow only comes into play with a large context
size?
I did notice one potential issue. It's clearly not the main cause of
the gibberish, since it doesn't happen when using q4_1 weights quantized
by llama.cpp itself, but it seems concerning. When doing a matrix
multiplication of f16 * f32 => f32 or q4_1 * f32 => f32, at least when
the multiplication is not done with BLAS, the intermediate results are
stored in the smaller format rather than f32. This seems like an
unnecessary waste of precision, especially in the q4_1 case.
I was originally hoping to validate the results by matching the Python
implementation's output exactly, but precision and non-associativity
issues make this very difficult, including when performing matrix
multiplications and, especially, computing norms.
Anyway, design details:
The models being imported store per-layer weights in essentially q4_1
format, although the addend and scale are shared across an entire row
rather than every group of 32 weights. This script duplicates the
addend and scale to match ggml's expectations, at the cost of wasting
some memory.
However, there are two differences which I accommodated changing the
output format (and adding corresponding support to main.cpp) rather than
having the script match the existing one:
- The tok_embeddings and output weights (i.e. the weights that aren't
per-layer) are f16 instead of q4_1. They could be converted to q4_1,
and the impact of the loss of precision would probably be low, but
this would rule out exactly matching the Python implementation's
output for validation.
- There is no sharding, since the input doesn't have it, and for a
CPU-only implementation it seems more useful to avoid having to deal
with multiple files.
The new format is differentiated from existing q4_1 format by changing
the 'f16' header flag to a new value, 4. That said, I think a cleaner
approach would be to change main.cpp to support loading each tensor with
an arbitrary sharding configuration and type rather than hardcoding
specific combinations of types. So far I've wasted too much time
debugging to try implementing this...
* Add missing permutation. Now it works.
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2023-03-21 16:42:25 +00:00
|
|
|
ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // tok_embeddings
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // norm
|
|
|
|
|
Importer for GPTQ quantized LLaMA models (#301)
* [WIP, broken] Importer for GPTQ quantized LLaMA models
Based on: https://github.com/qwopqwop200/GPTQ-for-LLaMa
Current status: Something is busted. The output starts out decent, but
quickly degrades into gibberish. This doesn't happen with either the
original GPTQ-for-LLaMa using the same weights, or llama.cpp when using
weights quantized by its own quantizer. Is there a bug in the
conversion script that somehow only comes into play with a large context
size?
I did notice one potential issue. It's clearly not the main cause of
the gibberish, since it doesn't happen when using q4_1 weights quantized
by llama.cpp itself, but it seems concerning. When doing a matrix
multiplication of f16 * f32 => f32 or q4_1 * f32 => f32, at least when
the multiplication is not done with BLAS, the intermediate results are
stored in the smaller format rather than f32. This seems like an
unnecessary waste of precision, especially in the q4_1 case.
I was originally hoping to validate the results by matching the Python
implementation's output exactly, but precision and non-associativity
issues make this very difficult, including when performing matrix
multiplications and, especially, computing norms.
Anyway, design details:
The models being imported store per-layer weights in essentially q4_1
format, although the addend and scale are shared across an entire row
rather than every group of 32 weights. This script duplicates the
addend and scale to match ggml's expectations, at the cost of wasting
some memory.
However, there are two differences which I accommodated changing the
output format (and adding corresponding support to main.cpp) rather than
having the script match the existing one:
- The tok_embeddings and output weights (i.e. the weights that aren't
per-layer) are f16 instead of q4_1. They could be converted to q4_1,
and the impact of the loss of precision would probably be low, but
this would rule out exactly matching the Python implementation's
output for validation.
- There is no sharding, since the input doesn't have it, and for a
CPU-only implementation it seems more useful to avoid having to deal
with multiple files.
The new format is differentiated from existing q4_1 format by changing
the 'f16' header flag to a new value, 4. That said, I think a cleaner
approach would be to change main.cpp to support loading each tensor with
an arbitrary sharding configuration and type rather than hardcoding
specific combinations of types. So far I've wasted too much time
debugging to try implementing this...
* Add missing permutation. Now it works.
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2023-03-21 16:42:25 +00:00
|
|
|
ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // output
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // attention_norm
|
|
|
|
|
|
|
|
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wq
|
|
|
|
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wk
|
|
|
|
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wv
|
|
|
|
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wo
|
|
|
|
|
|
|
|
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ffn_norm
|
|
|
|
|
|
|
|
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w1
|
|
|
|
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w2
|
|
|
|
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w3
|
|
|
|
|
2023-03-19 17:57:00 +00:00
|
|
|
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k
|
|
|
|
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
ctx_size += (5 + 10*n_layer)*256; // object overhead
|
|
|
|
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0));
|
2023-03-10 18:40:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// create the ggml context
|
|
|
|
{
|
|
|
|
struct ggml_init_params params = {
|
2023-03-12 20:15:00 +00:00
|
|
|
/*.mem_size =*/ ctx_size,
|
|
|
|
/*.mem_buffer =*/ NULL,
|
2023-03-10 18:40:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
model.ctx = ggml_init(params);
|
|
|
|
if (!model.ctx) {
|
|
|
|
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// prepare memory for the weights
|
|
|
|
{
|
|
|
|
const auto & hparams = model.hparams;
|
|
|
|
|
|
|
|
const int n_embd = hparams.n_embd;
|
|
|
|
const int n_layer = hparams.n_layer;
|
|
|
|
const int n_vocab = hparams.n_vocab;
|
|
|
|
|
|
|
|
model.layers.resize(n_layer);
|
|
|
|
|
Importer for GPTQ quantized LLaMA models (#301)
* [WIP, broken] Importer for GPTQ quantized LLaMA models
Based on: https://github.com/qwopqwop200/GPTQ-for-LLaMa
Current status: Something is busted. The output starts out decent, but
quickly degrades into gibberish. This doesn't happen with either the
original GPTQ-for-LLaMa using the same weights, or llama.cpp when using
weights quantized by its own quantizer. Is there a bug in the
conversion script that somehow only comes into play with a large context
size?
I did notice one potential issue. It's clearly not the main cause of
the gibberish, since it doesn't happen when using q4_1 weights quantized
by llama.cpp itself, but it seems concerning. When doing a matrix
multiplication of f16 * f32 => f32 or q4_1 * f32 => f32, at least when
the multiplication is not done with BLAS, the intermediate results are
stored in the smaller format rather than f32. This seems like an
unnecessary waste of precision, especially in the q4_1 case.
I was originally hoping to validate the results by matching the Python
implementation's output exactly, but precision and non-associativity
issues make this very difficult, including when performing matrix
multiplications and, especially, computing norms.
Anyway, design details:
The models being imported store per-layer weights in essentially q4_1
format, although the addend and scale are shared across an entire row
rather than every group of 32 weights. This script duplicates the
addend and scale to match ggml's expectations, at the cost of wasting
some memory.
However, there are two differences which I accommodated changing the
output format (and adding corresponding support to main.cpp) rather than
having the script match the existing one:
- The tok_embeddings and output weights (i.e. the weights that aren't
per-layer) are f16 instead of q4_1. They could be converted to q4_1,
and the impact of the loss of precision would probably be low, but
this would rule out exactly matching the Python implementation's
output for validation.
- There is no sharding, since the input doesn't have it, and for a
CPU-only implementation it seems more useful to avoid having to deal
with multiple files.
The new format is differentiated from existing q4_1 format by changing
the 'f16' header flag to a new value, 4. That said, I think a cleaner
approach would be to change main.cpp to support loading each tensor with
an arbitrary sharding configuration and type rather than hardcoding
specific combinations of types. So far I've wasted too much time
debugging to try implementing this...
* Add missing permutation. Now it works.
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2023-03-21 16:42:25 +00:00
|
|
|
model.tok_embeddings = ggml_new_tensor_2d(ctx, vtype, n_embd, n_vocab);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
model.norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
Importer for GPTQ quantized LLaMA models (#301)
* [WIP, broken] Importer for GPTQ quantized LLaMA models
Based on: https://github.com/qwopqwop200/GPTQ-for-LLaMa
Current status: Something is busted. The output starts out decent, but
quickly degrades into gibberish. This doesn't happen with either the
original GPTQ-for-LLaMa using the same weights, or llama.cpp when using
weights quantized by its own quantizer. Is there a bug in the
conversion script that somehow only comes into play with a large context
size?
I did notice one potential issue. It's clearly not the main cause of
the gibberish, since it doesn't happen when using q4_1 weights quantized
by llama.cpp itself, but it seems concerning. When doing a matrix
multiplication of f16 * f32 => f32 or q4_1 * f32 => f32, at least when
the multiplication is not done with BLAS, the intermediate results are
stored in the smaller format rather than f32. This seems like an
unnecessary waste of precision, especially in the q4_1 case.
I was originally hoping to validate the results by matching the Python
implementation's output exactly, but precision and non-associativity
issues make this very difficult, including when performing matrix
multiplications and, especially, computing norms.
Anyway, design details:
The models being imported store per-layer weights in essentially q4_1
format, although the addend and scale are shared across an entire row
rather than every group of 32 weights. This script duplicates the
addend and scale to match ggml's expectations, at the cost of wasting
some memory.
However, there are two differences which I accommodated changing the
output format (and adding corresponding support to main.cpp) rather than
having the script match the existing one:
- The tok_embeddings and output weights (i.e. the weights that aren't
per-layer) are f16 instead of q4_1. They could be converted to q4_1,
and the impact of the loss of precision would probably be low, but
this would rule out exactly matching the Python implementation's
output for validation.
- There is no sharding, since the input doesn't have it, and for a
CPU-only implementation it seems more useful to avoid having to deal
with multiple files.
The new format is differentiated from existing q4_1 format by changing
the 'f16' header flag to a new value, 4. That said, I think a cleaner
approach would be to change main.cpp to support loading each tensor with
an arbitrary sharding configuration and type rather than hardcoding
specific combinations of types. So far I've wasted too much time
debugging to try implementing this...
* Add missing permutation. Now it works.
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2023-03-21 16:42:25 +00:00
|
|
|
model.output = ggml_new_tensor_2d(ctx, vtype, n_embd, n_vocab);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
// map by name
|
|
|
|
model.tensors["tok_embeddings.weight"] = model.tok_embeddings;
|
|
|
|
|
|
|
|
model.tensors["norm.weight"] = model.norm;
|
|
|
|
model.tensors["output.weight"] = model.output;
|
|
|
|
|
|
|
|
for (int i = 0; i < n_layer; ++i) {
|
|
|
|
auto & layer = model.layers[i];
|
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
layer.attention_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
layer.wq = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
|
|
|
layer.wk = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
|
|
|
layer.wv = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
|
|
|
layer.wo = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
layer.w1 = ggml_new_tensor_2d(ctx, wtype, n_embd, n_ff);
|
|
|
|
layer.w2 = ggml_new_tensor_2d(ctx, wtype, n_ff, n_embd);
|
|
|
|
layer.w3 = ggml_new_tensor_2d(ctx, wtype, n_embd, n_ff);
|
|
|
|
|
|
|
|
// map by name
|
|
|
|
model.tensors["layers." + std::to_string(i) + ".attention_norm.weight"] = layer.attention_norm;
|
|
|
|
|
|
|
|
model.tensors["layers." + std::to_string(i) + ".attention.wq.weight"] = layer.wq;
|
|
|
|
model.tensors["layers." + std::to_string(i) + ".attention.wk.weight"] = layer.wk;
|
|
|
|
model.tensors["layers." + std::to_string(i) + ".attention.wv.weight"] = layer.wv;
|
|
|
|
model.tensors["layers." + std::to_string(i) + ".attention.wo.weight"] = layer.wo;
|
|
|
|
|
|
|
|
model.tensors["layers." + std::to_string(i) + ".ffn_norm.weight"] = layer.ffn_norm;
|
|
|
|
|
|
|
|
model.tensors["layers." + std::to_string(i) + ".feed_forward.w1.weight"] = layer.w1;
|
|
|
|
model.tensors["layers." + std::to_string(i) + ".feed_forward.w2.weight"] = layer.w2;
|
|
|
|
model.tensors["layers." + std::to_string(i) + ".feed_forward.w3.weight"] = layer.w3;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// key + value memory
|
|
|
|
{
|
|
|
|
const auto & hparams = model.hparams;
|
|
|
|
|
|
|
|
const int n_embd = hparams.n_embd;
|
|
|
|
const int n_layer = hparams.n_layer;
|
|
|
|
const int n_ctx = hparams.n_ctx;
|
|
|
|
|
|
|
|
const int n_mem = n_layer*n_ctx;
|
|
|
|
const int n_elements = n_embd*n_mem;
|
|
|
|
|
2023-03-19 17:57:00 +00:00
|
|
|
model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements);
|
|
|
|
model.memory_v = ggml_new_tensor_1d(ctx, memory_type, n_elements);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
|
|
|
|
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "%s: memory_size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem);
|
2023-03-10 18:40:58 +00:00
|
|
|
}
|
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
const size_t file_offset = fin.tellg();
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
fin.close();
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
std::vector<uint8_t> tmp;
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
for (int i = 0; i < n_parts; ++i) {
|
|
|
|
const int part_id = i;
|
|
|
|
//const int part_id = n_parts - i - 1;
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
std::string fname_part = fname;
|
|
|
|
if (i > 0) {
|
|
|
|
fname_part += "." + std::to_string(i);
|
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "%s: loading model part %d/%d from '%s'\n", __func__, i+1, n_parts, fname_part.c_str());
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
fin = std::ifstream(fname_part, std::ios::binary);
|
2023-03-13 16:33:43 +00:00
|
|
|
fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
|
2023-03-11 08:47:09 +00:00
|
|
|
fin.seekg(file_offset);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
// load weights
|
|
|
|
{
|
|
|
|
int n_tensors = 0;
|
|
|
|
size_t total_size = 0;
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "%s: ", __func__);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
while (true) {
|
|
|
|
int32_t n_dims;
|
|
|
|
int32_t length;
|
|
|
|
int32_t ftype;
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
|
|
|
|
fin.read(reinterpret_cast<char *>(&length), sizeof(length));
|
|
|
|
fin.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
|
|
|
|
|
|
|
|
if (fin.eof()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t nelements = 1;
|
|
|
|
int32_t ne[2] = { 1, 1 };
|
|
|
|
for (int i = 0; i < n_dims; ++i) {
|
|
|
|
fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
|
|
|
|
nelements *= ne[i];
|
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
std::string name(length, 0);
|
|
|
|
fin.read(&name[0], length);
|
|
|
|
|
|
|
|
if (model.tensors.find(name.data()) == model.tensors.end()) {
|
|
|
|
fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// split_type = 0: split by columns
|
|
|
|
// split_type = 1: split by rows
|
|
|
|
int split_type = 0;
|
|
|
|
|
|
|
|
// split_type = 0:
|
|
|
|
// regex:
|
|
|
|
// - tok_embeddings.*
|
|
|
|
// - layers.*.attention.wo.weight
|
|
|
|
// - layers.*.feed_forward.w2.weight
|
|
|
|
|
|
|
|
// split_type = 1:
|
|
|
|
// regex:
|
|
|
|
// - output.*
|
|
|
|
// - layers.*.attention.wq.weight
|
|
|
|
// - layers.*.attention.wk.weight
|
|
|
|
// - layers.*.attention.wv.weight
|
|
|
|
// - layers.*.feed_forward.w1.weight
|
|
|
|
// - layers.*.feed_forward.w3.weight
|
|
|
|
if (name.find("tok_embeddings") != std::string::npos) {
|
|
|
|
split_type = 0;
|
|
|
|
} else if (name.find("layers") != std::string::npos) {
|
|
|
|
if (name.find("attention.wo.weight") != std::string::npos) {
|
|
|
|
split_type = 0;
|
|
|
|
} else if (name.find("feed_forward.w2.weight") != std::string::npos) {
|
|
|
|
split_type = 0;
|
|
|
|
} else {
|
|
|
|
split_type = 1;
|
|
|
|
}
|
|
|
|
} else if (name.find("output") != std::string::npos) {
|
|
|
|
split_type = 1;
|
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
auto tensor = model.tensors[name.data()];
|
|
|
|
|
|
|
|
if (n_dims == 1) {
|
|
|
|
if (ggml_nelements(tensor) != nelements) {
|
|
|
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (ggml_nelements(tensor)/n_parts != nelements) {
|
|
|
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n_dims == 1) {
|
|
|
|
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
|
|
|
|
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
|
|
|
__func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (split_type == 0) {
|
|
|
|
if (tensor->ne[0]/n_parts != ne[0] || tensor->ne[1] != ne[1]) {
|
|
|
|
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
|
|
|
__func__, name.data(), tensor->ne[0]/n_parts, tensor->ne[1], ne[0], ne[1]);
|
2023-03-10 18:40:58 +00:00
|
|
|
return false;
|
|
|
|
}
|
2023-03-11 08:47:09 +00:00
|
|
|
} else {
|
|
|
|
if (tensor->ne[0] != ne[0] || tensor->ne[1]/n_parts != ne[1]) {
|
|
|
|
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
|
|
|
__func__, name.data(), tensor->ne[0], tensor->ne[1]/n_parts, ne[0], ne[1]);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
if (0) {
|
|
|
|
static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", };
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "%24s - [%5d, %5d], type = %6s, split = %d\n", name.data(), ne[0], ne[1], ftype_str[ftype], split_type);
|
2023-03-11 08:47:09 +00:00
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
size_t bpe = 0;
|
|
|
|
|
|
|
|
switch (ftype) {
|
|
|
|
case 0: bpe = ggml_type_size(GGML_TYPE_F32); break;
|
|
|
|
case 1: bpe = ggml_type_size(GGML_TYPE_F16); break;
|
|
|
|
case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break;
|
|
|
|
case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break;
|
|
|
|
default:
|
|
|
|
{
|
|
|
|
fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if (n_dims == 1 || n_parts == 1) {
|
|
|
|
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
|
|
|
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
|
|
|
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (part_id == 0) {
|
|
|
|
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
|
|
|
} else {
|
|
|
|
fin.seekg(ggml_nbytes(tensor), std::ios::cur);
|
|
|
|
}
|
|
|
|
|
|
|
|
total_size += ggml_nbytes(tensor);
|
|
|
|
} else {
|
|
|
|
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)/n_parts) {
|
|
|
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
|
|
|
__func__, name.data(), ggml_nbytes(tensor)/n_parts, nelements*bpe);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (split_type == 0) {
|
|
|
|
const int np0 = ne[0];
|
|
|
|
|
|
|
|
const size_t row_size = (tensor->ne[0]/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
|
|
|
|
assert(row_size == tensor->nb[1]);
|
|
|
|
|
|
|
|
for (int i1 = 0; i1 < ne[1]; ++i1) {
|
|
|
|
const size_t offset_row = i1*row_size;
|
|
|
|
const size_t offset = offset_row + ((part_id*np0)/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
|
|
|
|
fin.read(reinterpret_cast<char *>(tensor->data) + offset, row_size/n_parts);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
const int np1 = ne[1];
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
const size_t row_size = (tensor->ne[0]/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
|
|
|
|
|
|
|
|
for (int i1 = 0; i1 < ne[1]; ++i1) {
|
|
|
|
const size_t offset_row = (i1 + part_id*np1)*row_size;
|
|
|
|
fin.read(reinterpret_cast<char *>(tensor->data) + offset_row, row_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
total_size += ggml_nbytes(tensor)/n_parts;
|
|
|
|
}
|
|
|
|
|
2023-03-13 16:39:56 +00:00
|
|
|
//fprintf(stderr, "%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
|
2023-03-11 08:47:09 +00:00
|
|
|
if (++n_tensors % 8 == 0) {
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, ".");
|
|
|
|
fflush(stderr);
|
2023-03-11 08:47:09 +00:00
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
}
|
|
|
|
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, " done\n");
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, n_tensors);
|
2023-03-11 08:47:09 +00:00
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-11 08:47:09 +00:00
|
|
|
fin.close();
|
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// evaluate the transformer
|
|
|
|
//
|
|
|
|
// - model: the model
|
|
|
|
// - n_threads: number of threads to use
|
|
|
|
// - n_past: the context size so far
|
|
|
|
// - embd_inp: the embeddings of the tokens in the context
|
|
|
|
// - embd_w: the predicted logits for the next token
|
|
|
|
//
|
|
|
|
// The GPT-J model requires about 16MB of memory per input token.
|
|
|
|
//
|
|
|
|
bool llama_eval(
|
|
|
|
const llama_model & model,
|
|
|
|
const int n_threads,
|
|
|
|
const int n_past,
|
2023-03-21 15:29:41 +00:00
|
|
|
const std::vector<llama_vocab::id> & embd_inp,
|
|
|
|
std::vector<float> & embd_w,
|
2023-03-21 16:27:42 +00:00
|
|
|
size_t & mem_per_token,
|
|
|
|
bool return_all_logits = false) {
|
2023-03-10 18:40:58 +00:00
|
|
|
const int N = embd_inp.size();
|
|
|
|
|
|
|
|
const auto & hparams = model.hparams;
|
|
|
|
|
|
|
|
const int n_embd = hparams.n_embd;
|
|
|
|
const int n_layer = hparams.n_layer;
|
|
|
|
const int n_ctx = hparams.n_ctx;
|
|
|
|
const int n_head = hparams.n_head;
|
|
|
|
const int n_vocab = hparams.n_vocab;
|
2023-03-10 21:46:39 +00:00
|
|
|
const int n_rot = hparams.n_embd/hparams.n_head;
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-19 16:37:02 +00:00
|
|
|
// TODO: check if this size scales with n_ctx linearly and remove constant. somehow I feel it wasn't the case
|
2023-03-15 19:42:40 +00:00
|
|
|
// static size_t buf_size = hparams.n_ctx*1024*1024;
|
2023-03-11 10:44:21 +00:00
|
|
|
static size_t buf_size = 512u*1024*1024;
|
2023-03-10 18:40:58 +00:00
|
|
|
static void * buf = malloc(buf_size);
|
|
|
|
|
|
|
|
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
|
2023-03-21 16:27:42 +00:00
|
|
|
const size_t buf_size_new = 1.3*(mem_per_token*N); // add 30% to account for ggml object overhead
|
2023-03-13 16:39:56 +00:00
|
|
|
//fprintf(stderr, "\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
// reallocate
|
|
|
|
buf_size = buf_size_new;
|
|
|
|
buf = realloc(buf, buf_size);
|
|
|
|
if (buf == nullptr) {
|
|
|
|
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ggml_init_params params = {
|
2023-03-12 20:15:00 +00:00
|
|
|
/*.mem_size =*/ buf_size,
|
|
|
|
/*.mem_buffer =*/ buf,
|
2023-03-10 18:40:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ggml_context * ctx0 = ggml_init(params);
|
2023-03-12 20:15:00 +00:00
|
|
|
ggml_cgraph gf = {};
|
|
|
|
gf.n_threads = n_threads;
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
|
|
|
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
|
|
|
|
|
|
|
|
struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.tok_embeddings, embd);
|
|
|
|
|
|
|
|
for (int il = 0; il < n_layer; ++il) {
|
|
|
|
struct ggml_tensor * inpSA = inpL;
|
|
|
|
|
|
|
|
struct ggml_tensor * cur;
|
|
|
|
|
|
|
|
// norm
|
|
|
|
{
|
2023-03-15 22:41:38 +00:00
|
|
|
cur = ggml_rms_norm(ctx0, inpL);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
// cur = attention_norm*cur
|
|
|
|
cur = ggml_mul(ctx0,
|
|
|
|
ggml_repeat(ctx0, model.layers[il].attention_norm, cur),
|
|
|
|
cur);
|
|
|
|
}
|
|
|
|
|
|
|
|
// self-attention
|
|
|
|
{
|
|
|
|
struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
|
|
|
|
struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
|
|
|
|
struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
|
|
|
|
|
|
|
|
// store key and value to memory
|
|
|
|
if (N >= 1) {
|
|
|
|
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
|
|
|
|
struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past));
|
|
|
|
|
|
|
|
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
|
|
|
|
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
|
|
|
|
struct ggml_tensor * Q =
|
|
|
|
ggml_permute(ctx0,
|
|
|
|
ggml_rope(ctx0,
|
|
|
|
ggml_cpy(ctx0,
|
|
|
|
Qcur,
|
|
|
|
ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)),
|
|
|
|
n_past, n_rot, 0),
|
|
|
|
0, 2, 1, 3);
|
|
|
|
|
|
|
|
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
|
|
|
|
struct ggml_tensor * K =
|
|
|
|
ggml_permute(ctx0,
|
|
|
|
ggml_rope(ctx0,
|
|
|
|
ggml_reshape_3d(ctx0,
|
|
|
|
ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd),
|
|
|
|
n_embd/n_head, n_head, n_past + N),
|
|
|
|
n_past, n_rot, 1),
|
|
|
|
0, 2, 1, 3);
|
|
|
|
|
|
|
|
// K * Q
|
|
|
|
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
|
|
|
|
|
|
|
|
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
|
|
|
struct ggml_tensor * KQ_scaled =
|
|
|
|
ggml_scale(ctx0,
|
|
|
|
KQ,
|
|
|
|
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
|
|
|
|
);
|
|
|
|
|
|
|
|
// KQ_masked = mask_past(KQ_scaled)
|
|
|
|
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
|
|
|
|
|
|
|
|
// KQ = soft_max(KQ_masked)
|
|
|
|
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
|
|
|
|
|
|
|
|
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
|
|
|
|
struct ggml_tensor * V_trans =
|
|
|
|
ggml_permute(ctx0,
|
|
|
|
ggml_reshape_3d(ctx0,
|
|
|
|
ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
|
|
|
|
n_embd/n_head, n_head, n_past + N),
|
|
|
|
1, 2, 0, 3);
|
|
|
|
|
|
|
|
// KQV = transpose(V) * KQ_soft_max
|
|
|
|
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max);
|
|
|
|
|
|
|
|
// KQV_merged = KQV.permute(0, 2, 1, 3)
|
|
|
|
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
|
|
|
|
|
|
|
|
// cur = KQV_merged.contiguous().view(n_embd, N)
|
|
|
|
cur = ggml_cpy(ctx0,
|
|
|
|
KQV_merged,
|
|
|
|
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
|
|
|
|
|
|
|
|
// projection (no bias)
|
|
|
|
cur = ggml_mul_mat(ctx0,
|
|
|
|
model.layers[il].wo,
|
|
|
|
cur);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
|
|
|
|
|
|
|
|
// feed-forward network
|
|
|
|
{
|
|
|
|
// norm
|
|
|
|
{
|
2023-03-15 22:41:38 +00:00
|
|
|
cur = ggml_rms_norm(ctx0, inpFF);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
// cur = ffn_norm*cur
|
|
|
|
cur = ggml_mul(ctx0,
|
|
|
|
ggml_repeat(ctx0, model.layers[il].ffn_norm, cur),
|
|
|
|
cur);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
|
|
|
|
model.layers[il].w3,
|
|
|
|
cur);
|
|
|
|
|
|
|
|
|
|
|
|
cur = ggml_mul_mat(ctx0,
|
|
|
|
model.layers[il].w1,
|
|
|
|
cur);
|
|
|
|
|
|
|
|
// SILU activation
|
|
|
|
cur = ggml_silu(ctx0, cur);
|
|
|
|
|
|
|
|
cur = ggml_mul(ctx0, cur, tmp);
|
|
|
|
|
|
|
|
cur = ggml_mul_mat(ctx0,
|
|
|
|
model.layers[il].w2,
|
|
|
|
cur);
|
|
|
|
}
|
|
|
|
|
|
|
|
cur = ggml_add(ctx0, cur, inpFF);
|
|
|
|
|
|
|
|
// input for next layer
|
|
|
|
inpL = cur;
|
|
|
|
}
|
|
|
|
|
|
|
|
// norm
|
|
|
|
{
|
2023-03-15 22:41:38 +00:00
|
|
|
inpL = ggml_rms_norm(ctx0, inpL);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
// inpL = norm*inpL
|
|
|
|
inpL = ggml_mul(ctx0,
|
|
|
|
ggml_repeat(ctx0, model.norm, inpL),
|
|
|
|
inpL);
|
|
|
|
}
|
|
|
|
|
|
|
|
// lm_head
|
|
|
|
{
|
|
|
|
inpL = ggml_mul_mat(ctx0, model.output, inpL);
|
|
|
|
}
|
|
|
|
|
|
|
|
// logits -> probs
|
|
|
|
//inpL = ggml_soft_max(ctx0, inpL);
|
|
|
|
|
|
|
|
// run the computation
|
|
|
|
ggml_build_forward_expand(&gf, inpL);
|
|
|
|
ggml_graph_compute (ctx0, &gf);
|
|
|
|
|
|
|
|
//if (n_past%100 == 0) {
|
|
|
|
// ggml_graph_print (&gf);
|
|
|
|
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
|
|
|
|
//}
|
|
|
|
|
|
|
|
//embd_w.resize(n_vocab*N);
|
|
|
|
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
|
|
|
|
|
2023-03-21 16:27:42 +00:00
|
|
|
if (return_all_logits) {
|
|
|
|
embd_w.resize(n_vocab * N);
|
|
|
|
memcpy(embd_w.data(), (float *) ggml_get_data(inpL), sizeof(float)*n_vocab*N);
|
|
|
|
} else {
|
|
|
|
// return result for just the last token
|
|
|
|
embd_w.resize(n_vocab);
|
|
|
|
memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
if (mem_per_token == 0) {
|
|
|
|
mem_per_token = ggml_used_mem(ctx0)/N;
|
|
|
|
}
|
2023-03-13 16:39:56 +00:00
|
|
|
//fprintf(stderr, "used_mem = %zu\n", ggml_used_mem(ctx0));
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
ggml_free(ctx0);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-03-21 16:27:42 +00:00
|
|
|
std::vector<double> softmax(const std::vector<float>& logits) {
|
|
|
|
std::vector<double> probs(logits.size());
|
|
|
|
float max_logit = logits[0];
|
|
|
|
for (float v : logits) max_logit = std::max(max_logit, v);
|
|
|
|
double sum_exp = 0.0;
|
|
|
|
for (size_t i = 0; i < logits.size(); i++) {
|
|
|
|
// Subtract the maximum logit value from the current logit value for numerical stability
|
|
|
|
float logit = logits[i] - max_logit;
|
|
|
|
double exp_logit = std::exp(logit);
|
|
|
|
sum_exp += exp_logit;
|
|
|
|
probs[i] = exp_logit;
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < probs.size(); i++) probs[i] /= sum_exp;
|
|
|
|
return probs;
|
|
|
|
}
|
|
|
|
|
|
|
|
void perplexity(const llama_vocab &vocab, const llama_model &model, const gpt_params ¶ms, size_t mem_per_token) {
|
|
|
|
// Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
|
|
|
|
// Run `./main --perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
|
|
|
|
// Output: `perplexity: 13.5106 [114/114]`
|
|
|
|
std::vector<llama_vocab::id> tokens = ::llama_tokenize(vocab, params.prompt, true);
|
|
|
|
|
|
|
|
int count = 0;
|
|
|
|
double nll = 0.0;
|
|
|
|
int seq_count = tokens.size() / params.n_ctx;
|
|
|
|
printf("Calculating perplexity over %d chunks\n", seq_count);
|
|
|
|
for (int i = 0; i < seq_count; ++i) {
|
|
|
|
int start = i * params.n_ctx;
|
|
|
|
int end = start + params.n_ctx - 1;
|
|
|
|
std::vector<llama_vocab::id> embd(tokens.begin() + start, tokens.begin() + end);
|
|
|
|
std::vector<float> logits;
|
|
|
|
auto start_t = std::chrono::high_resolution_clock::now();
|
|
|
|
if (!llama_eval(model, params.n_threads, 0, embd, logits, mem_per_token, true)) {
|
|
|
|
fprintf(stderr, "Failed to predict\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
auto end_t = std::chrono::high_resolution_clock::now();
|
|
|
|
if (i == 0) {
|
|
|
|
double seconds = std::chrono::duration<double>(end_t - start_t).count();
|
|
|
|
printf("%.2f seconds per pass - ETA %.2f hours\n", seconds, (seconds * seq_count) / (60.0*60.0));
|
|
|
|
}
|
|
|
|
// We get the logits for all the tokens in the context window (params.n_ctx)
|
|
|
|
// from llama_eval above. Now, based on https://huggingface.co/docs/transformers/perplexity,
|
|
|
|
// calculate the perplexity over the last half the window (so the model always has
|
|
|
|
// some context to predict the token).
|
|
|
|
//
|
|
|
|
// We rely on the fact that attention in the forward pass only looks at previous
|
|
|
|
// tokens here, so the logits returned for each token are an accurate representation
|
|
|
|
// of what the model would have predicted at that point.
|
|
|
|
//
|
|
|
|
// Example, we have a context window of 512, we will compute perplexity for each of the
|
|
|
|
// last 256 tokens. Then, we split the input up into context window size chunks to
|
|
|
|
// process the entire prompt.
|
|
|
|
for (int j = params.n_ctx / 2; j < params.n_ctx - 1; ++j) {
|
|
|
|
// Calculate probability of next token, given the previous ones.
|
|
|
|
int n_vocab = model.hparams.n_vocab;
|
|
|
|
std::vector<float> tok_logits(
|
|
|
|
logits.begin() + j * n_vocab,
|
|
|
|
logits.begin() + (j + 1) * n_vocab);
|
|
|
|
double prob = softmax(tok_logits)[tokens[start + j + 1]];
|
|
|
|
nll += -std::log(prob);
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
// perplexity is e^(average negative log-likelihood)
|
|
|
|
printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
|
|
|
|
fflush(stdout);
|
|
|
|
}
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
|
2023-03-12 21:13:28 +00:00
|
|
|
static bool is_interacting = false;
|
|
|
|
|
2023-03-15 19:56:24 +00:00
|
|
|
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
2023-03-12 21:13:28 +00:00
|
|
|
void sigint_handler(int signo) {
|
2023-03-15 19:39:38 +00:00
|
|
|
printf(ANSI_COLOR_RESET);
|
2023-03-19 18:10:00 +00:00
|
|
|
printf("\n"); // this also force flush stdout.
|
2023-03-12 21:13:28 +00:00
|
|
|
if (signo == SIGINT) {
|
|
|
|
if (!is_interacting) {
|
|
|
|
is_interacting=true;
|
|
|
|
} else {
|
|
|
|
_exit(130);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-03-13 03:08:01 +00:00
|
|
|
#endif
|
2023-03-12 21:13:28 +00:00
|
|
|
|
2023-03-13 17:15:08 +00:00
|
|
|
const char * llama_print_system_info(void) {
|
|
|
|
static std::string s;
|
|
|
|
|
|
|
|
s = "";
|
|
|
|
s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
|
|
|
|
s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
|
|
|
|
s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
|
|
|
|
s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
|
|
|
|
s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
|
|
|
|
s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
|
|
|
|
s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
|
|
|
|
s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
|
|
|
|
s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
|
|
|
|
s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
|
|
|
|
s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
|
|
|
|
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
|
|
|
|
|
|
|
|
return s.c_str();
|
|
|
|
}
|
|
|
|
|
2023-03-10 18:40:58 +00:00
|
|
|
int main(int argc, char ** argv) {
|
2023-03-12 20:15:00 +00:00
|
|
|
ggml_time_init();
|
2023-03-10 18:40:58 +00:00
|
|
|
const int64_t t_main_start_us = ggml_time_us();
|
|
|
|
|
|
|
|
gpt_params params;
|
|
|
|
params.model = "models/llama-7B/ggml-model.bin";
|
|
|
|
|
|
|
|
if (gpt_params_parse(argc, argv, params) == false) {
|
|
|
|
return 1;
|
|
|
|
}
|
2023-03-19 16:37:02 +00:00
|
|
|
|
2023-03-19 00:10:47 +00:00
|
|
|
if (params.n_ctx > 2048) {
|
|
|
|
fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
|
|
|
|
"expect poor results\n", __func__, params.n_ctx);
|
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
if (params.seed < 0) {
|
|
|
|
params.seed = time(NULL);
|
|
|
|
}
|
|
|
|
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
std::mt19937 rng(params.seed);
|
2023-03-19 18:36:19 +00:00
|
|
|
if (params.random_prompt) {
|
2023-03-10 18:40:58 +00:00
|
|
|
params.prompt = gpt_random_prompt(rng);
|
|
|
|
}
|
|
|
|
|
2023-03-10 21:46:39 +00:00
|
|
|
// params.prompt = R"(// this function checks if the number n is prime
|
|
|
|
//bool is_prime(int n) {)";
|
|
|
|
|
2023-03-10 18:40:58 +00:00
|
|
|
int64_t t_load_us = 0;
|
|
|
|
|
2023-03-21 15:29:41 +00:00
|
|
|
llama_vocab vocab;
|
2023-03-10 18:40:58 +00:00
|
|
|
llama_model model;
|
|
|
|
|
|
|
|
// load the model
|
|
|
|
{
|
2023-03-19 17:57:00 +00:00
|
|
|
const ggml_type memory_type = params.memory_f16 ? GGML_TYPE_F16 : GGML_TYPE_F32;
|
2023-03-10 18:40:58 +00:00
|
|
|
const int64_t t_start_us = ggml_time_us();
|
2023-03-21 15:42:43 +00:00
|
|
|
if (!llama_model_load(params.model, model, vocab, params.n_ctx, params.n_parts, memory_type)) {
|
2023-03-10 18:40:58 +00:00
|
|
|
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
t_load_us = ggml_time_us() - t_start_us;
|
|
|
|
}
|
|
|
|
|
2023-03-13 17:15:08 +00:00
|
|
|
// print system information
|
|
|
|
{
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
|
|
|
params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
|
|
|
|
}
|
|
|
|
|
2023-03-21 16:27:42 +00:00
|
|
|
std::vector<float> logits;
|
|
|
|
|
|
|
|
// determine the required inference memory per token:
|
|
|
|
size_t mem_per_token = 0;
|
|
|
|
llama_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
|
|
|
|
|
|
|
|
if (params.perplexity) {
|
|
|
|
perplexity(vocab, model, params, mem_per_token);
|
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
|
2023-03-10 18:40:58 +00:00
|
|
|
int n_past = 0;
|
|
|
|
|
|
|
|
int64_t t_sample_us = 0;
|
|
|
|
int64_t t_predict_us = 0;
|
|
|
|
|
2023-03-17 20:05:58 +00:00
|
|
|
// Add a space in front of the first character to match OG llama tokenizer behavior
|
|
|
|
params.prompt.insert(0, 1, ' ');
|
2023-03-10 18:40:58 +00:00
|
|
|
// tokenize the prompt
|
2023-03-21 15:29:41 +00:00
|
|
|
std::vector<llama_vocab::id> embd_inp = ::llama_tokenize(vocab, params.prompt, true);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size());
|
|
|
|
|
2023-03-19 16:37:02 +00:00
|
|
|
// prefix & suffix for instruct mode
|
2023-03-21 15:29:41 +00:00
|
|
|
const std::vector<llama_vocab::id> inp_pfx = ::llama_tokenize(vocab, "\n\n### Instruction:\n\n", true);
|
|
|
|
const std::vector<llama_vocab::id> inp_sfx = ::llama_tokenize(vocab, "\n\n### Response:\n\n", false);
|
2023-03-19 16:37:02 +00:00
|
|
|
|
|
|
|
// in instruct mode, we inject a prefix and a suffix to each input by the user
|
|
|
|
if (params.instruct) {
|
|
|
|
params.interactive = true;
|
2023-03-19 19:33:06 +00:00
|
|
|
params.antiprompt.push_back("### Instruction:\n\n");
|
2023-03-19 16:37:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// enable interactive mode if reverse prompt is specified
|
2023-03-21 16:04:43 +00:00
|
|
|
if (params.antiprompt.size() != 0) {
|
2023-03-19 16:37:02 +00:00
|
|
|
params.interactive = true;
|
|
|
|
}
|
|
|
|
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
|
|
|
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
2023-03-10 18:40:58 +00:00
|
|
|
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], vocab.id_to_token.at(embd_inp[i]).c_str());
|
2023-03-10 18:40:58 +00:00
|
|
|
}
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "\n");
|
2023-03-12 21:13:28 +00:00
|
|
|
if (params.interactive) {
|
2023-03-13 03:08:01 +00:00
|
|
|
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
2023-03-12 21:13:28 +00:00
|
|
|
struct sigaction sigint_action;
|
|
|
|
sigint_action.sa_handler = sigint_handler;
|
|
|
|
sigemptyset (&sigint_action.sa_mask);
|
2023-03-13 17:15:08 +00:00
|
|
|
sigint_action.sa_flags = 0;
|
2023-03-12 21:13:28 +00:00
|
|
|
sigaction(SIGINT, &sigint_action, NULL);
|
2023-03-15 19:56:24 +00:00
|
|
|
#elif defined (_WIN32)
|
|
|
|
signal(SIGINT, sigint_handler);
|
2023-03-13 03:08:01 +00:00
|
|
|
#endif
|
2023-03-12 21:13:28 +00:00
|
|
|
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "%s: interactive mode on.\n", __func__);
|
2023-03-12 21:13:28 +00:00
|
|
|
|
2023-03-21 16:04:43 +00:00
|
|
|
if(params.antiprompt.size()) {
|
|
|
|
for (auto antiprompt : params.antiprompt) {
|
|
|
|
fprintf(stderr, "Reverse prompt: '%s'\n", antiprompt.c_str());
|
2023-03-12 21:13:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "sampling parameters: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
|
|
|
|
fprintf(stderr, "\n\n");
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-21 15:29:41 +00:00
|
|
|
std::vector<llama_vocab::id> embd;
|
2023-03-10 18:40:58 +00:00
|
|
|
|
2023-03-12 09:27:42 +00:00
|
|
|
int last_n_size = params.repeat_last_n;
|
2023-03-21 15:29:41 +00:00
|
|
|
std::vector<llama_vocab::id> last_n_tokens(last_n_size);
|
2023-03-12 09:27:42 +00:00
|
|
|
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
|
|
|
|
|
2023-03-12 21:13:28 +00:00
|
|
|
if (params.interactive) {
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "== Running in interactive mode. ==\n"
|
2023-03-15 19:56:24 +00:00
|
|
|
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
2023-03-12 21:13:28 +00:00
|
|
|
" - Press Ctrl+C to interject at any time.\n"
|
2023-03-13 03:08:01 +00:00
|
|
|
#endif
|
2023-03-12 21:13:28 +00:00
|
|
|
" - Press Return to return control to LLaMa.\n"
|
2023-03-19 16:37:02 +00:00
|
|
|
" - If you want to submit another line, end your input in '\\'.\n\n");
|
|
|
|
is_interacting = true;
|
2023-03-12 21:13:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int input_consumed = 0;
|
|
|
|
bool input_noecho = false;
|
|
|
|
|
2023-03-19 16:37:02 +00:00
|
|
|
int remaining_tokens = params.n_predict;
|
2023-03-12 21:13:28 +00:00
|
|
|
|
2023-03-12 22:07:34 +00:00
|
|
|
// set the color for the prompt which will be output initially
|
2023-03-12 21:13:28 +00:00
|
|
|
if (params.use_color) {
|
2023-03-21 16:14:46 +00:00
|
|
|
#if defined (_WIN32)
|
|
|
|
// Enable ANSI colors on Windows 10+
|
|
|
|
unsigned long dwMode = 0;
|
|
|
|
void* hConOut = GetStdHandle((unsigned long)-11); // STD_OUTPUT_HANDLE (-11)
|
|
|
|
if (hConOut && hConOut != (void*)-1 && GetConsoleMode(hConOut, &dwMode) && !(dwMode & 0x4)) {
|
|
|
|
SetConsoleMode(hConOut, dwMode | 0x4); // ENABLE_VIRTUAL_TERMINAL_PROCESSING (0x4)
|
|
|
|
}
|
|
|
|
#endif
|
2023-03-12 21:13:28 +00:00
|
|
|
printf(ANSI_COLOR_YELLOW);
|
|
|
|
}
|
|
|
|
|
2023-03-19 16:37:02 +00:00
|
|
|
while (remaining_tokens > 0 || params.interactive) {
|
2023-03-10 18:40:58 +00:00
|
|
|
// predict
|
|
|
|
if (embd.size() > 0) {
|
|
|
|
const int64_t t_start_us = ggml_time_us();
|
|
|
|
|
|
|
|
if (!llama_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) {
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "Failed to predict\n");
|
2023-03-10 18:40:58 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
t_predict_us += ggml_time_us() - t_start_us;
|
|
|
|
}
|
|
|
|
|
|
|
|
n_past += embd.size();
|
|
|
|
embd.clear();
|
|
|
|
|
2023-03-21 15:59:16 +00:00
|
|
|
if ((int) embd_inp.size() <= input_consumed) {
|
2023-03-12 22:07:34 +00:00
|
|
|
// out of user input, sample next token
|
2023-03-12 20:23:15 +00:00
|
|
|
const float top_k = params.top_k;
|
2023-03-10 18:40:58 +00:00
|
|
|
const float top_p = params.top_p;
|
|
|
|
const float temp = params.temp;
|
2023-03-12 09:27:42 +00:00
|
|
|
const float repeat_penalty = params.repeat_penalty;
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
const int n_vocab = model.hparams.n_vocab;
|
|
|
|
|
2023-03-21 15:29:41 +00:00
|
|
|
llama_vocab::id id = 0;
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
const int64_t t_start_sample_us = ggml_time_us();
|
|
|
|
|
2023-03-19 18:22:48 +00:00
|
|
|
if (params.ignore_eos) {
|
|
|
|
// set the logit of the eos token to zero to avoid sampling it
|
|
|
|
logits[logits.size() - n_vocab + EOS_TOKEN_ID] = 0;
|
|
|
|
}
|
|
|
|
|
2023-03-12 20:23:15 +00:00
|
|
|
id = llama_sample_top_p_top_k(vocab, logits.data() + (logits.size() - n_vocab), last_n_tokens, repeat_penalty, top_k, top_p, temp, rng);
|
2023-03-12 09:27:42 +00:00
|
|
|
|
|
|
|
last_n_tokens.erase(last_n_tokens.begin());
|
|
|
|
last_n_tokens.push_back(id);
|
2023-03-10 18:40:58 +00:00
|
|
|
|
|
|
|
t_sample_us += ggml_time_us() - t_start_sample_us;
|
|
|
|
}
|
|
|
|
|
|
|
|
// add it to the context
|
|
|
|
embd.push_back(id);
|
2023-03-12 21:13:28 +00:00
|
|
|
|
|
|
|
// echo this to console
|
|
|
|
input_noecho = false;
|
|
|
|
|
|
|
|
// decrement remaining sampling budget
|
|
|
|
--remaining_tokens;
|
2023-03-10 18:40:58 +00:00
|
|
|
} else {
|
2023-03-12 22:07:34 +00:00
|
|
|
// some user input remains from prompt or interaction, forward it to processing
|
2023-03-21 15:59:16 +00:00
|
|
|
while ((int) embd_inp.size() > input_consumed) {
|
2023-03-12 21:13:28 +00:00
|
|
|
embd.push_back(embd_inp[input_consumed]);
|
2023-03-12 09:27:42 +00:00
|
|
|
last_n_tokens.erase(last_n_tokens.begin());
|
2023-03-12 21:13:28 +00:00
|
|
|
last_n_tokens.push_back(embd_inp[input_consumed]);
|
|
|
|
++input_consumed;
|
2023-03-19 17:46:32 +00:00
|
|
|
if ((int) embd.size() >= params.n_batch) {
|
2023-03-10 18:40:58 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// display text
|
2023-03-12 21:13:28 +00:00
|
|
|
if (!input_noecho) {
|
|
|
|
for (auto id : embd) {
|
|
|
|
printf("%s", vocab.id_to_token[id].c_str());
|
|
|
|
}
|
|
|
|
fflush(stdout);
|
|
|
|
}
|
2023-03-19 19:44:30 +00:00
|
|
|
// reset color to default if we there is no pending user input
|
|
|
|
if (!input_noecho && params.use_color && (int)embd_inp.size() == input_consumed) {
|
|
|
|
printf(ANSI_COLOR_RESET);
|
|
|
|
}
|
2023-03-12 21:13:28 +00:00
|
|
|
|
|
|
|
// in interactive mode, and not currently processing queued inputs;
|
|
|
|
// check if we should prompt the user for more
|
2023-03-21 15:59:16 +00:00
|
|
|
if (params.interactive && (int) embd_inp.size() <= input_consumed) {
|
2023-03-12 21:13:28 +00:00
|
|
|
// check for reverse prompt
|
2023-03-21 16:04:43 +00:00
|
|
|
std::string last_output;
|
|
|
|
for (auto id : last_n_tokens) {
|
|
|
|
last_output += vocab.id_to_token[id];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if each of the reverse prompts appears at the end of the output.
|
|
|
|
for (std::string antiprompt : params.antiprompt) {
|
|
|
|
if (last_output.find(antiprompt.c_str(), last_output.length() - antiprompt.length(), antiprompt.length()) != std::string::npos) {
|
2023-03-19 19:33:06 +00:00
|
|
|
is_interacting = true;
|
|
|
|
break;
|
|
|
|
}
|
2023-03-12 21:13:28 +00:00
|
|
|
}
|
|
|
|
if (is_interacting) {
|
2023-03-19 16:37:02 +00:00
|
|
|
if (params.instruct) {
|
|
|
|
input_consumed = embd_inp.size();
|
|
|
|
embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end());
|
|
|
|
|
|
|
|
printf("\n> ");
|
|
|
|
}
|
|
|
|
|
2023-03-13 17:15:08 +00:00
|
|
|
// currently being interactive
|
2023-03-19 19:44:30 +00:00
|
|
|
if (params.use_color) printf(ANSI_BOLD ANSI_COLOR_GREEN);
|
|
|
|
std::string buffer;
|
|
|
|
std::string line;
|
2023-03-19 16:37:02 +00:00
|
|
|
bool another_line = true;
|
2023-03-19 19:44:30 +00:00
|
|
|
do {
|
|
|
|
std::getline(std::cin, line);
|
|
|
|
if (line.empty() || line.back() != '\\') {
|
2023-03-12 21:13:28 +00:00
|
|
|
another_line = false;
|
2023-03-19 19:44:30 +00:00
|
|
|
} else {
|
|
|
|
line.pop_back(); // Remove the continue character
|
2023-03-19 16:37:02 +00:00
|
|
|
}
|
2023-03-19 19:44:30 +00:00
|
|
|
buffer += line + '\n'; // Append the line to the result
|
|
|
|
} while (another_line);
|
|
|
|
if (params.use_color) printf(ANSI_COLOR_RESET);
|
2023-03-19 16:37:02 +00:00
|
|
|
|
2023-03-21 15:29:41 +00:00
|
|
|
std::vector<llama_vocab::id> line_inp = ::llama_tokenize(vocab, buffer, false);
|
2023-03-19 19:44:30 +00:00
|
|
|
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
2023-03-12 23:35:51 +00:00
|
|
|
|
2023-03-19 19:44:30 +00:00
|
|
|
if (params.instruct) {
|
|
|
|
embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
|
2023-03-12 21:13:28 +00:00
|
|
|
}
|
|
|
|
|
2023-03-19 19:44:30 +00:00
|
|
|
remaining_tokens -= line_inp.size();
|
|
|
|
|
|
|
|
input_noecho = true; // do not echo this again
|
2023-03-12 21:13:28 +00:00
|
|
|
}
|
2023-03-19 19:44:30 +00:00
|
|
|
is_interacting = false;
|
2023-03-10 18:40:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// end of text token
|
2023-03-19 18:22:48 +00:00
|
|
|
if (embd.back() == EOS_TOKEN_ID) {
|
2023-03-19 16:37:02 +00:00
|
|
|
if (params.interactive) {
|
|
|
|
is_interacting = true;
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, " [end of text]\n");
|
|
|
|
break;
|
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
}
|
2023-03-19 18:31:17 +00:00
|
|
|
|
|
|
|
// In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
|
|
|
|
if (params.interactive && remaining_tokens <= 0) {
|
|
|
|
remaining_tokens = params.n_predict;
|
|
|
|
is_interacting = true;
|
|
|
|
}
|
2023-03-10 18:40:58 +00:00
|
|
|
}
|
|
|
|
|
2023-03-15 19:56:24 +00:00
|
|
|
#if defined (_WIN32)
|
|
|
|
signal(SIGINT, SIG_DFL);
|
|
|
|
#endif
|
2023-03-12 21:13:28 +00:00
|
|
|
|
2023-03-10 18:40:58 +00:00
|
|
|
// report timing
|
|
|
|
{
|
|
|
|
const int64_t t_main_end_us = ggml_time_us();
|
|
|
|
|
2023-03-13 16:39:56 +00:00
|
|
|
fprintf(stderr, "\n\n");
|
|
|
|
fprintf(stderr, "%s: mem per token = %8zu bytes\n", __func__, mem_per_token);
|
|
|
|
fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f);
|
|
|
|
fprintf(stderr, "%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f);
|
|
|
|
fprintf(stderr, "%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
|
|
|
|
fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
|
2023-03-10 18:40:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ggml_free(model.ctx);
|
|
|
|
|
2023-03-15 19:39:38 +00:00
|
|
|
if (params.use_color) {
|
|
|
|
printf(ANSI_COLOR_RESET);
|
|
|
|
}
|
|
|
|
|
2023-03-10 18:40:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|