mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-09 15:29:43 +00:00
convert.py : Update to support 70B HF format model files (#2427)
* convert.py : fix llama 2 70b conversion from Huggingface
This commit is contained in:
parent
1a941869cb
commit
7c529cede6
1 changed files with 52 additions and 44 deletions
42
convert.py
Executable file → Normal file
42
convert.py
Executable file → Normal file
|
@ -133,7 +133,7 @@ TENSORS_SET = set(TENSORS_LIST)
|
||||||
|
|
||||||
def find_n_mult(n_ff: int, n_embd: int) -> int:
|
def find_n_mult(n_ff: int, n_embd: int) -> int:
|
||||||
# hardcoded magic range
|
# hardcoded magic range
|
||||||
for n_mult in range(256, 1, -1):
|
for n_mult in range(8192, 1, -1):
|
||||||
calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult
|
calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult
|
||||||
if calc_ff == n_ff:
|
if calc_ff == n_ff:
|
||||||
return n_mult
|
return n_mult
|
||||||
|
@ -146,6 +146,7 @@ class Params:
|
||||||
n_mult: int
|
n_mult: int
|
||||||
n_head: int
|
n_head: int
|
||||||
n_layer: int
|
n_layer: int
|
||||||
|
n_kv_head: Optional[int] # This parameter is only used for Llama 2
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def guessed(model: 'LazyModel') -> 'Params':
|
def guessed(model: 'LazyModel') -> 'Params':
|
||||||
|
@ -172,6 +173,7 @@ class Params:
|
||||||
n_mult = 256,
|
n_mult = 256,
|
||||||
n_head = n_head,
|
n_head = n_head,
|
||||||
n_layer = n_layer,
|
n_layer = n_layer,
|
||||||
|
n_kv_head = None,
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -183,6 +185,7 @@ class Params:
|
||||||
n_head = config["num_attention_heads"];
|
n_head = config["num_attention_heads"];
|
||||||
n_layer = config["num_hidden_layers"];
|
n_layer = config["num_hidden_layers"];
|
||||||
n_ff = config["intermediate_size"];
|
n_ff = config["intermediate_size"];
|
||||||
|
n_kv_head = config.get("num_key_value_heads")
|
||||||
|
|
||||||
n_mult = find_n_mult(n_ff, n_embd);
|
n_mult = find_n_mult(n_ff, n_embd);
|
||||||
|
|
||||||
|
@ -192,6 +195,7 @@ class Params:
|
||||||
n_mult = n_mult,
|
n_mult = n_mult,
|
||||||
n_head = n_head,
|
n_head = n_head,
|
||||||
n_layer = n_layer,
|
n_layer = n_layer,
|
||||||
|
n_kv_head = n_kv_head,
|
||||||
)
|
)
|
||||||
|
|
||||||
# LLaMA v2 70B params.json
|
# LLaMA v2 70B params.json
|
||||||
|
@ -215,6 +219,7 @@ class Params:
|
||||||
n_mult = n_mult,
|
n_mult = n_mult,
|
||||||
n_head = n_head,
|
n_head = n_head,
|
||||||
n_layer = n_layer,
|
n_layer = n_layer,
|
||||||
|
n_kv_head = None,
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -317,7 +322,9 @@ class GGMLVocab:
|
||||||
Vocab = Union[SentencePieceVocab, GGMLVocab]
|
Vocab = Union[SentencePieceVocab, GGMLVocab]
|
||||||
|
|
||||||
|
|
||||||
def permute(weights: NDArray, n_head: int) -> NDArray:
|
def permute(weights: NDArray, n_head: int, n_kv_head: Optional[int] = None) -> NDArray:
|
||||||
|
if n_kv_head is not None and n_head != n_kv_head:
|
||||||
|
n_head //= n_kv_head
|
||||||
return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
|
return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
|
||||||
.swapaxes(1, 2)
|
.swapaxes(1, 2)
|
||||||
.reshape(weights.shape))
|
.reshape(weights.shape))
|
||||||
|
@ -368,7 +375,7 @@ class Tensor(metaclass=ABCMeta):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def astype(self, data_type: DataType) -> 'Tensor': ...
|
def astype(self, data_type: DataType) -> 'Tensor': ...
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def permute(self, n_head: int) -> 'Tensor': ...
|
def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'Tensor': ...
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ...
|
def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ...
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
@ -406,8 +413,8 @@ class UnquantizedTensor(Tensor):
|
||||||
r = self.ndarray.shape[0] // 3
|
r = self.ndarray.shape[0] // 3
|
||||||
return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...])
|
return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...])
|
||||||
|
|
||||||
def permute(self, n_head: int) -> 'UnquantizedTensor':
|
def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'UnquantizedTensor':
|
||||||
return UnquantizedTensor(permute(self.ndarray, n_head))
|
return UnquantizedTensor(permute(self.ndarray, n_head, n_kv_head))
|
||||||
|
|
||||||
|
|
||||||
def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, convert: bool = False) -> NDArray:
|
def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, convert: bool = False) -> NDArray:
|
||||||
|
@ -455,26 +462,27 @@ class GGMLQuantizedTensor(Tensor):
|
||||||
def to_ggml(self) -> 'GGMLQuantizedTensor':
|
def to_ggml(self) -> 'GGMLQuantizedTensor':
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def permute(self, n_head: int) -> 'GGMLQuantizedTensor':
|
def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'GGMLQuantizedTensor':
|
||||||
return GGMLQuantizedTensor(permute(self.ndarray, n_head), self.shape, self.data_type)
|
return GGMLQuantizedTensor(permute(self.ndarray, n_head, n_kv_head), self.shape, self.data_type)
|
||||||
|
|
||||||
|
|
||||||
GGMLCompatibleTensor = Union[UnquantizedTensor, GGMLQuantizedTensor]
|
GGMLCompatibleTensor = Union[UnquantizedTensor, GGMLQuantizedTensor]
|
||||||
|
|
||||||
|
|
||||||
class DeferredPermutedTensor(Tensor):
|
class DeferredPermutedTensor(Tensor):
|
||||||
def __init__(self, base: Tensor, n_head: int) -> None:
|
def __init__(self, base: Tensor, n_head: int, n_kv_head: Optional[int] = None) -> None:
|
||||||
self.base = base
|
self.base = base
|
||||||
self.n_head = n_head
|
self.n_head = n_head
|
||||||
|
self.n_kv_head = n_kv_head
|
||||||
self.data_type = self.base.data_type
|
self.data_type = self.base.data_type
|
||||||
|
|
||||||
def astype(self, data_type: DataType) -> Tensor:
|
def astype(self, data_type: DataType) -> Tensor:
|
||||||
return self.base.astype(data_type).permute(self.n_head)
|
return self.base.astype(data_type).permute(self.n_head, self.n_kv_head)
|
||||||
|
|
||||||
def to_ggml(self) -> GGMLCompatibleTensor:
|
def to_ggml(self) -> GGMLCompatibleTensor:
|
||||||
return self.base.to_ggml().permute(self.n_head)
|
return self.base.to_ggml().permute(self.n_head, self.n_kv_head)
|
||||||
|
|
||||||
def permute(self, n_head: int) -> Tensor:
|
def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> Tensor:
|
||||||
raise Exception("shouldn't permute twice")
|
raise Exception("shouldn't permute twice")
|
||||||
|
|
||||||
|
|
||||||
|
@ -566,8 +574,8 @@ class GPTQForLLaMaQuantizedTensor(Tensor):
|
||||||
ret.data_type = QuantizedDataType(groupsize=new_groupsize, have_addends=True, have_g_idx=False)
|
ret.data_type = QuantizedDataType(groupsize=new_groupsize, have_addends=True, have_g_idx=False)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def permute(self, n_head: int) -> Tensor:
|
def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> Tensor:
|
||||||
return DeferredPermutedTensor(self, n_head)
|
return DeferredPermutedTensor(self, n_head, n_kv_head)
|
||||||
|
|
||||||
def to_ggml(self) -> GGMLQuantizedTensor:
|
def to_ggml(self) -> GGMLQuantizedTensor:
|
||||||
# The output format looks like this:
|
# The output format looks like this:
|
||||||
|
@ -698,10 +706,10 @@ def merge_multifile_models(models_plus: List[ModelPlus]) -> ModelPlus:
|
||||||
return ModelPlus(model, paths, format, vocab)
|
return ModelPlus(model, paths, format, vocab)
|
||||||
|
|
||||||
|
|
||||||
def permute_lazy(lazy_tensor: LazyTensor, n_head: int) -> LazyTensor:
|
def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_kv_head: Optional[int] = None) -> LazyTensor:
|
||||||
def load() -> Tensor:
|
def load() -> Tensor:
|
||||||
return lazy_tensor.load().permute(n_head)
|
return lazy_tensor.load().permute(n_head, n_kv_head)
|
||||||
return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description)
|
return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}, {n_kv_head}) ' + lazy_tensor.description)
|
||||||
|
|
||||||
def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor:
|
def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor:
|
||||||
def load() -> Tensor:
|
def load() -> Tensor:
|
||||||
|
@ -726,7 +734,7 @@ def convert_transformers_to_orig(model: LazyModel, params: Params) -> LazyModel:
|
||||||
for i in itertools.count():
|
for i in itertools.count():
|
||||||
if f"model.layers.{i}.self_attn.q_proj.weight" in model:
|
if f"model.layers.{i}.self_attn.q_proj.weight" in model:
|
||||||
out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head)
|
out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head)
|
||||||
out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head)
|
out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_kv_head)
|
||||||
out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
|
out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
|
||||||
elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
|
elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
|
||||||
out[f"layers.{i}.attention.wq.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head)
|
out[f"layers.{i}.attention.wq.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head)
|
||||||
|
|
Loading…
Reference in a new issue