Skip to content

Commit

Permalink
convert-*.py: lint check and add back in type templates
Browse files Browse the repository at this point in the history
  • Loading branch information
mofosyne committed May 24, 2024
1 parent 9a6062d commit 77e928c
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 10 deletions.
25 changes: 17 additions & 8 deletions convert-hf-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from enum import IntEnum
from pathlib import Path
from hashlib import sha256
from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Sequence, TypeVar, cast
from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Sequence, TypeVar, cast, Optional
from dataclasses import dataclass

import math
Expand Down Expand Up @@ -148,18 +148,27 @@ def get_model_name(metadata, hyperparameters, dir_model, model_arch):
return gguf.MODEL_ARCH_NAMES[model_arch]
self.model_name = get_model_name(self.metadata, self.hparams, self.dir_model, self.model_arch)

# Generate default filename based on model specification and available metadata
def extract_encoding_scheme(ftype):
# Extracts and converts the encoding scheme from the given file type name.
# e.g. 'gguf.LlamaFileType.ALL_F32' --> 'F32'
return ftype.name.partition("_")[2].upper()
# Extracts and converts the encoding scheme from the given file type name. e.g. 'gguf.LlamaFileType.ALL_F32' --> 'F32'
encodingScheme = self.ftype.name.partition("_")[2]

# Get Expert Count From Hyperparameters
expert_count = self.hparams["num_local_experts"] if "num_local_experts" in self.hparams else None
self.fname_default = f"{gguf.naming_convention(self.model_name, self.metadata.version, expert_count, self.parameter_count(), extract_encoding_scheme(self.ftype))}"

# Generate default filename based on model specification and available metadata
self.fname_default = f"{gguf.naming_convention(self.model_name, self.metadata.version, expert_count, self.parameter_count(), encodingScheme)}"

# Filename Output
if fname_out is not None:
# custom defined filename and path was provided
self.fname_out = fname_out
def fill_templated_filename(filename: str, encodingScheme: str):
# Given a file name fill in any type templates e.g. 'some-model-name.{ftype}.gguf'
ftype_uppercase: str = encodingScheme.upper()
ftype_lowercase: str = encodingScheme.lower()
return filename.format(ftype_lowercase,
outtype=ftype_lowercase, ftype=ftype_lowercase,
OUTTYPE=ftype_uppercase, FTYPE=ftype_uppercase)

self.fname_out = fname_out.parent / fill_templated_filename(fname_out.name, encodingScheme)
else:
# output in the same directory as the model by default
self.fname_out = dir_model.parent / self.fname_default
Expand Down
2 changes: 0 additions & 2 deletions convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -1320,8 +1320,6 @@ def pick_output_type(model: LazyModel, output_type_str: str | None) -> GGMLFileT

def model_parameter_count(model: LazyModel) -> int:
# TODO: Ensure parameter count is accurate throughout various model type
# May currently overestimate parameter count in Mamba model because
# output weights is tied with token embeddings.
total_model_parameters = 0
for name, lazy_tensor in model.items():
# Got A Tensor
Expand Down

0 comments on commit 77e928c

Please sign in to comment.