Skip to content

Commit

Permalink
adding fine tune example with s3 as the dataset store
Browse files Browse the repository at this point in the history
  • Loading branch information
deepanker13 committed Feb 19, 2024
1 parent e5bdd3c commit e0d1f69
Show file tree
Hide file tree
Showing 6 changed files with 116 additions and 14 deletions.
96 changes: 92 additions & 4 deletions examples/sdk/train_api.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,13 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"# import the libraries\n",
"from kubeflow.training.api.training_client import TrainingClient\n",
"from kubeflow.storage_initializer.s3 import S3DatasetParams\n",
"from kubeflow.storage_initializer.hugging_face import (\n",
" HuggingFaceModelParams,\n",
" HuggingFaceTrainParams,\n",
Expand All @@ -40,6 +41,13 @@
"client = TrainingClient()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"1. USING HUGGING FACE AS THE DATASET STORE"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand All @@ -48,7 +56,7 @@
"source": [
"# mention the model, datasets and training parameters\n",
"client.train(\n",
" name=\"huggingface-test\",\n",
" name=\"test\",\n",
" num_workers=2,\n",
" num_procs_per_worker=1,\n",
" # specify the storage class if you don't want to use the default one for the storage-initializer PVC\n",
Expand Down Expand Up @@ -102,14 +110,94 @@
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"2. USING S3 AS THE DATASET \n",
"Note - The dataset folder inside the bucket has to be named similar to how hugging face names its folder with the downloaded dataset, i.e (huggingface username or any name without a - or _ ) + triple underscore + dataset name\n",
"For example -> imdatta0___ultrachat_1k can be one possible name"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# mention the model, datasets and training parameters\n",
"client.train(\n",
" name=\"test\",\n",
" num_workers=2,\n",
" num_procs_per_worker=1,\n",
" # specify the storage class if you don't want to use the default one for the storage-initializer PVC\n",
" # storage_config={\n",
" # \"size\": \"10Gi\",\n",
" # \"storage_class\": \"<your storage class>\",\n",
" # },\n",
" model_provider_parameters=HuggingFaceModelParams(\n",
" model_uri=\"hf://TinyLlama/TinyLlama-1.1B-Chat-v1.0\",\n",
" transformer_type=transformers.AutoModelForCausalLM,\n",
" ),\n",
" # it is assumed for text related tasks, you have 'text' column in the dataset.\n",
" # for more info on how dataset is loaded check load_and_preprocess_data function in sdk/python/kubeflow/trainer/hf_llm_training.py\n",
" dataset_provider_parameters=S3DatasetParams(\n",
" {\n",
" \"endpoint_url\": \"http://10.117.63.3\",\n",
" \"bucket_name\": \"test\",\n",
" \"file_key\": \"imdatta0___ultrachat_1k\",\n",
" \"region_name\": \"us-east-1\",\n",
" \"access_key\": \"qEMHyz8wNw\",\n",
" \"secret_key\": \"qIp_QN\",\n",
" }\n",
" ),\n",
" train_parameters=HuggingFaceTrainParams(\n",
" lora_config=LoraConfig(\n",
" r=8,\n",
" lora_alpha=8,\n",
" lora_dropout=0.1,\n",
" bias=\"none\",\n",
" task_type=\"CAUSAL_LM\",\n",
" ),\n",
" training_parameters=TrainingArguments(\n",
" num_train_epochs=1,\n",
" per_device_train_batch_size=1,\n",
" gradient_accumulation_steps=1,\n",
" gradient_checkpointing=True,\n",
" gradient_checkpointing_kwargs={\n",
" \"use_reentrant\": False\n",
" }, # this is mandatory if checkpointng is enabled\n",
" warmup_steps=0.02,\n",
" learning_rate=1,\n",
" lr_scheduler_type=\"cosine\",\n",
" bf16=False,\n",
" logging_steps=0.01,\n",
" output_dir=INIT_CONTAINER_MOUNT_PATH,\n",
" optim=f\"sgd\",\n",
" save_steps=0.01,\n",
" save_total_limit=3,\n",
" disable_tqdm=False,\n",
" resume_from_checkpoint=True,\n",
" remove_unused_columns=True,\n",
" ddp_backend=\"nccl\", # change the backend to gloo if you want cpu based training and remove the gpu key in resources_per_worker\n",
" ),\n",
" ),\n",
" resources_per_worker={\n",
" \"gpu\": 1,\n",
" \"cpu\": 8,\n",
" \"memory\": \"8Gi\",\n",
" }, # remove the gpu key if you don't want to attach gpus to the pods\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# check the logs of the job\n",
"client.get_job_logs(name=\"huggingface-test\", job_kind=constants.PYTORCHJOB_KIND)"
"client.get_job_logs(name=\"test\", job_kind=constants.PYTORCHJOB_KIND)"
]
}
],
Expand All @@ -129,7 +217,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.11.6"
}
},
"nbformat": 4,
Expand Down
2 changes: 1 addition & 1 deletion sdk/python/kubeflow/storage_initializer/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ einops>=0.6.1
transformers_stream_generator==0.0.4
boto3==1.33.9
transformers>=4.20.0
peft>=0.3.0
peft==0.3.0
huggingface_hub==0.16.4
datasets>=2.13.2

24 changes: 18 additions & 6 deletions sdk/python/kubeflow/storage_initializer/s3.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,22 @@ def download_dataset(self):
region_name=self.config.region_name,
)

# Download the file
s3_client.download_file(
self.config.bucket_name,
self.config.file_key,
os.path.join(VOLUME_PATH_DATASET, self.config.file_key),
response = s3_client.list_objects_v2(
Bucket=self.config.bucket_name, Prefix=self.config.file_key
)
print(f"File downloaded to: {VOLUME_PATH_DATASET}")
# Download the file
for obj in response.get("Contents", []):
# Extract the object key (filename)
obj_key = obj["Key"]
os.makedirs(
os.path.join(VOLUME_PATH_DATASET, self.config.file_key), exist_ok=True
)
s3_client.download_file(
self.config.bucket_name,
obj_key,
os.path.join(
os.path.join(VOLUME_PATH_DATASET, self.config.file_key),
os.path.basename(obj_key),
),
)
print(f"Files downloaded")
2 changes: 1 addition & 1 deletion sdk/python/kubeflow/trainer/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
peft>=0.3.0
peft==0.3.0
datasets==2.15.0
transformers>=4.20.0
bitsandbytes>=0.42.0
Expand Down
4 changes: 3 additions & 1 deletion sdk/python/kubeflow/training/api/training_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,8 +172,10 @@ def train(

if isinstance(dataset_provider_parameters, S3DatasetParams):
dp = "s3"
dataset_name = dataset_provider_parameters.file_key.replace("_" * 3, "/")
elif isinstance(dataset_provider_parameters, HfDatasetParams):
dp = "hf"
dataset_name = dataset_provider_parameters.repo_id
else:
raise ValueError(
f"Invalid dataset provider parameters {dataset_provider_parameters}"
Expand Down Expand Up @@ -210,7 +212,7 @@ def train(
"--dataset_dir",
VOLUME_PATH_DATASET,
"--dataset_name",
dataset_provider_parameters.repo_id,
dataset_name,
"--lora_config",
json.dumps(train_parameters.lora_config.__dict__, cls=utils.SetEncoder),
"--training_parameters",
Expand Down
2 changes: 1 addition & 1 deletion sdk/python/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,6 @@
tests_require=TESTS_REQUIRES,
extras_require={
"test": TESTS_REQUIRES,
"huggingface": ["transformers>=4.20.0", "peft>=0.3.0"],
"huggingface": ["transformers>=4.20.0", "peft==0.3.0"],
},
)

0 comments on commit e0d1f69

Please sign in to comment.