Skip to content

Commit

Permalink
Move the file caching logic to within the model __init__ function.
Browse files Browse the repository at this point in the history
This allows the model loaded from the UI to also be cached.

PiperOrigin-RevId: 676052839
  • Loading branch information
bdu91 authored and LIT team committed Sep 18, 2024
1 parent ba4d975 commit 80d3a17
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 9 deletions.
8 changes: 8 additions & 0 deletions lit_nlp/examples/prompt_debugging/keras_lms.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from lit_nlp.api import types as lit_types
from lit_nlp.examples.prompt_debugging import constants as pd_constants
from lit_nlp.examples.prompt_debugging import utils as pd_utils
from lit_nlp.lib import file_cache
from lit_nlp.lib import utils as lit_utils


Expand Down Expand Up @@ -74,6 +75,13 @@ def __init__(
if model is not None:
self.model = model
elif model_name_or_path is not None:
if (
is_tar_gz := model_name_or_path.endswith(".tar.gz")
) or file_cache.is_remote(model_name_or_path):
model_name_or_path = file_cache.cached_path(
model_name_or_path,
extract_compressed_file=is_tar_gz,
)
self.model = keras_models.CausalLM.from_preset(model_name_or_path)
else:
raise ValueError("Must provide either model or model_name_or_path.")
Expand Down
7 changes: 0 additions & 7 deletions lit_nlp/examples/prompt_debugging/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from lit_nlp import app as lit_app
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
from lit_nlp.lib import file_cache


DEFAULT_BATCH_SIZE = 1
Expand Down Expand Up @@ -85,12 +84,6 @@ def get_models(
model_name, path = model_string.split(":", 1)
logging.info("Loading model '%s' from '%s'", model_name, path)

if path.endswith(".tar.gz") or file_cache.is_remote(path):
path = file_cache.cached_path(
path,
extract_compressed_file=path.endswith(".tar.gz"),
)

if dl_framework == "kerasnlp":
from lit_nlp.examples.prompt_debugging import keras_lms # pylint: disable=g-import-not-at-top # pytype: disable=import-error

Expand Down
7 changes: 5 additions & 2 deletions lit_nlp/examples/prompt_debugging/transformers_lms.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,12 @@ def __init__(
else:
# Normally path is a directory; if it's an archive file, download and
# extract to the transformers cache.
if model_name_or_path.endswith(".tar.gz"):
if (
is_tar_gz := model_name_or_path.endswith(".tar.gz")
) or file_cache.is_remote(model_name_or_path):
model_name_or_path = file_cache.cached_path(
model_name_or_path, extract_compressed_file=True
model_name_or_path,
extract_compressed_file=is_tar_gz,
)

# Note: we need to left-pad for generation to work properly.
Expand Down

0 comments on commit 80d3a17

Please sign in to comment.