diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index 785f7b40f4..4d6ddac6ed 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -92,6 +92,7 @@ class LocalSettings(BaseModel): llm_hf_repo_id: str llm_hf_model_file: str embedding_hf_model_name: str + llm_hf_resume_download: bool class SagemakerSettings(BaseModel): diff --git a/scripts/setup b/scripts/setup index 377bbe0b3d..534e3fae9b 100755 --- a/scripts/setup +++ b/scripts/setup @@ -24,6 +24,7 @@ hf_hub_download( filename=settings().local.llm_hf_model_file, cache_dir=models_cache_path, local_dir=models_path, + resume_download=settings().local.llm_hf_resume_download, ) print("LLM model downloaded!") diff --git a/settings.yaml b/settings.yaml index f686fdb54a..f88c9c535a 100644 --- a/settings.yaml +++ b/settings.yaml @@ -30,6 +30,7 @@ local: llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.1-GGUF llm_hf_model_file: mistral-7b-instruct-v0.1.Q4_K_M.gguf embedding_hf_model_name: BAAI/bge-small-en-v1.5 + llm_hf_resume_download: True sagemaker: llm_endpoint_name: huggingface-pytorch-tgi-inference-2023-09-25-19-53-32-140