Skip to content

Commit

Permalink
updated examples for train API (#2077)
Browse files Browse the repository at this point in the history
Signed-off-by: shruti2522 <shruti.apc01@gmail.com>
  • Loading branch information
shruti2522 authored Apr 26, 2024
1 parent 6ce4d57 commit dd1226c
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 6 deletions.
4 changes: 2 additions & 2 deletions examples/pytorch/language-modeling/train_api_hf_dataset.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
"from kubeflow.storage_initializer.s3 import S3DatasetParams\n",
"from kubeflow.storage_initializer.hugging_face import (\n",
" HuggingFaceModelParams,\n",
" HuggingFaceTrainParams,\n",
" HuggingFaceTrainerParams,\n",
" HfDatasetParams,\n",
")\n",
"from kubeflow.storage_initializer.constants import INIT_CONTAINER_MOUNT_PATH\n",
Expand Down Expand Up @@ -71,7 +71,7 @@
" # it is assumed for text related tasks, you have 'text' column in the dataset.\n",
" # for more info on how dataset is loaded check load_and_preprocess_data function in sdk/python/kubeflow/trainer/hf_llm_training.py\n",
" dataset_provider_parameters=HfDatasetParams(repo_id=\"imdatta0/ultrachat_1k\"),\n",
" train_parameters=HuggingFaceTrainParams(\n",
" trainer_parameters=HuggingFaceTrainerParams(\n",
" lora_config=LoraConfig(\n",
" r=8,\n",
" lora_alpha=8,\n",
Expand Down
4 changes: 2 additions & 2 deletions examples/pytorch/language-modeling/train_api_s3_dataset.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
"from kubeflow.training.api.training_client import TrainingClient\n",
"from kubeflow.storage_initializer.hugging_face import (\n",
" HuggingFaceModelParams,\n",
" HuggingFaceTrainParams,\n",
" HuggingFaceTrainerParams,\n",
" HfDatasetParams,\n",
")\n",
"from kubeflow.storage_initializer.constants import INIT_CONTAINER_MOUNT_PATH\n",
Expand Down Expand Up @@ -90,7 +90,7 @@
" \"secret_key\": s3_secret_key,\n",
" }\n",
" ),\n",
" train_parameters=HuggingFaceTrainParams(\n",
" trainer_parameters=HuggingFaceTrainerParams(\n",
" lora_config=LoraConfig(\n",
" r=8,\n",
" lora_alpha=8,\n",
Expand Down
4 changes: 2 additions & 2 deletions examples/pytorch/text-classification/Fine-Tune-BERT-LLM.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -613,7 +613,7 @@
"from kubeflow.training import TrainingClient\n",
"from kubeflow.storage_initializer.hugging_face import (\n",
" HuggingFaceModelParams,\n",
" HuggingFaceTrainParams,\n",
" HuggingFaceTrainerParams,\n",
" HfDatasetParams,\n",
")\n",
"\n",
Expand Down Expand Up @@ -651,7 +651,7 @@
" split=\"train[:3000]\",\n",
" ),\n",
" # Specify HuggingFace Trainer parameters. In this example, we will skip evaluation and model checkpoints.\n",
" train_parameters=HuggingFaceTrainParams(\n",
" trainer_parameters=HuggingFaceTrainerParams(\n",
" training_parameters=transformers.TrainingArguments(\n",
" output_dir=\"test_trainer\",\n",
" save_strategy=\"no\",\n",
Expand Down

0 comments on commit dd1226c

Please sign in to comment.