mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-09 15:29:43 +00:00
py : removed unused model
variable and verified that the code functions correctly with vocab_only
setting. Also confirmed that the code works as expected after running with reduced memory usage due to deletion of no-longer-needed variable. (#547)
This commit is contained in:
parent
96f9c0506f
commit
692ce3164e
1 changed files with 0 additions and 2 deletions
|
@ -145,13 +145,11 @@ def main():
|
||||||
|
|
||||||
print(f"Extracting only the vocab from '{fname_model}'\n")
|
print(f"Extracting only the vocab from '{fname_model}'\n")
|
||||||
|
|
||||||
model = torch.load(fname_model, map_location="cpu")
|
|
||||||
|
|
||||||
with open(fname_out, "wb") as fout:
|
with open(fname_out, "wb") as fout:
|
||||||
write_header(fout, hparams, ftype)
|
write_header(fout, hparams, ftype)
|
||||||
write_tokens(fout, tokenizer)
|
write_tokens(fout, tokenizer)
|
||||||
|
|
||||||
del model
|
|
||||||
|
|
||||||
print(f"Done. Output file: {fname_out}\n")
|
print(f"Done. Output file: {fname_out}\n")
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue