From d20a4888848d007a86855edf0bd4dd13df822a32 Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Thu, 15 Feb 2024 21:26:05 +0000 Subject: [PATCH 01/21] PyTorch TPU Dockerfile --- .../2.0/transformers/4.37.2/py310/Dockerfile | 1 + .../2.1/transformers/4.37.2/py310/Dockerfile | 70 +++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 containers/pytorch/training/tpu/2.0/transformers/4.37.2/py310/Dockerfile create mode 100644 containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile diff --git a/containers/pytorch/training/tpu/2.0/transformers/4.37.2/py310/Dockerfile b/containers/pytorch/training/tpu/2.0/transformers/4.37.2/py310/Dockerfile new file mode 100644 index 00000000..c3346967 --- /dev/null +++ b/containers/pytorch/training/tpu/2.0/transformers/4.37.2/py310/Dockerfile @@ -0,0 +1 @@ +FROM us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla:r2.1.0_3.10_tpuvm diff --git a/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile b/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile new file mode 100644 index 00000000..27f559e0 --- /dev/null +++ b/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile @@ -0,0 +1,70 @@ +FROM us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla:r2.1.0_3.10_tpuvm +# The image with PyTorch = 2.1, Python=3.10 +# Read more about it here: https://github.com/pytorch/xla?tab=readme-ov-file#docker + +LABEL maintainer="Hugging Face" +ARG DEBIAN_FRONTEND=noninteractive + +# Versions +ARG TRANSFORMERS='4.37.2' +ARG DIFFUSERS='0.26.1' +ARG PEFT='0.8.2' +ARG TRL='0.7.10' +ARG DATASETS='2.16.1' +ARG ACCELERATE='0.27.0' +ARG EVALUATE='0.4.1' +ARG SENTENCE_TRANSFORMERS='2.3.1' + +RUN apt-get update \ + && apt-get install -y \ + bzip2 \ + curl \ + git \ + git-lfs \ + tar \ + gcc \ + g++ \ + libaio-dev \ + # audio + libsndfile1-dev \ + ffmpeg \ + apt-transport-https \ + gnupg \ + ca-certificates \ + && apt-get clean autoremove --yes + +# Update pip +RUN pip install --upgrade pip + + +# Install Hugging Face Libraries +RUN pip install --upgrade --no-cache-dir \ + transformers[sklearn,sentencepiece,vision]==${TRANSFORMERS} \ + diffusers==${DIFFUSERS} \ + datasets==${DATASETS} \ + accelerate==${ACCELERATE} \ + evaluate==${EVALUATE} \ + peft==${PEFT} \ + trl==${TRL} \ + sentence-transformers==${SENTENCE_TRANSFORMERS} + +#Install Google Cloud Dependencies +RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" \ + | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ + curl https://packages.cloud.google.com/apt/doc/apt-key.gpg \ + | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && \ + apt-get update -y && \ + apt-get install google-cloud-sdk -y + +RUN pip install --upgrade --no-cache-dir \ + google-cloud-storage \ + google-cloud-bigquery \ + google-cloud-aiplatform \ + google-cloud-pubsub \ + google-cloud-logging + +# Check if correct versions are installed +RUN python -c "import transformers, diffusers, datasets, accelerate, evaluate, peft, trl, sentence_transformers, torch; \ + assert all([mod.__version__ == version for mod, version in [(transformers, '${TRANSFORMERS}'), (diffusers, '${DIFFUSERS}'), \ + (datasets, '${DATASETS}'), (accelerate, '${ACCELERATE}'), (evaluate, '${EVALUATE}'), (peft, '${PEFT}'), (trl, '${TRL}'), \ + (sentence_transformers, '${SENTENCE_TRANSFORMERS}'), (torch, '2.1.0')]])" \ No newline at end of file From 84ca2b2d45dd6384c5f6df195c002c5e5605f4ea Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Thu, 15 Feb 2024 21:26:40 +0000 Subject: [PATCH 02/21] PyTorch TPU Dockerfile --- .../training/tpu/2.1/transformers/4.37.2/py310/Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile b/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile index 27f559e0..87a69147 100644 --- a/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile +++ b/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile @@ -36,7 +36,6 @@ RUN apt-get update \ # Update pip RUN pip install --upgrade pip - # Install Hugging Face Libraries RUN pip install --upgrade --no-cache-dir \ transformers[sklearn,sentencepiece,vision]==${TRANSFORMERS} \ From 42bbd210c2789f53bf72a919e7ab06722633bf6f Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Fri, 16 Feb 2024 14:44:18 +0100 Subject: [PATCH 03/21] Add Notebook --- .../training/tpu/2.1/transformers/4.37.2/py310/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile b/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile index 87a69147..86ea0115 100644 --- a/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile +++ b/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile @@ -14,6 +14,7 @@ ARG DATASETS='2.16.1' ARG ACCELERATE='0.27.0' ARG EVALUATE='0.4.1' ARG SENTENCE_TRANSFORMERS='2.3.1' +ARG NOTEBOOK='7.1.0' RUN apt-get update \ && apt-get install -y \ @@ -45,7 +46,8 @@ RUN pip install --upgrade --no-cache-dir \ evaluate==${EVALUATE} \ peft==${PEFT} \ trl==${TRL} \ - sentence-transformers==${SENTENCE_TRANSFORMERS} + sentence-transformers==${SENTENCE_TRANSFORMERS} \ + notebook==${NOTEBOOK} #Install Google Cloud Dependencies RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" \ From e679d997e2973ad633831bf0e109da15590550b6 Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Fri, 16 Feb 2024 16:41:27 +0100 Subject: [PATCH 04/21] remove: unused dockerfile, sentence-transformers --- .../training/tpu/2.0/transformers/4.37.2/py310/Dockerfile | 1 - .../training/tpu/2.1/transformers/4.37.2/py310/Dockerfile | 6 ++---- 2 files changed, 2 insertions(+), 5 deletions(-) delete mode 100644 containers/pytorch/training/tpu/2.0/transformers/4.37.2/py310/Dockerfile diff --git a/containers/pytorch/training/tpu/2.0/transformers/4.37.2/py310/Dockerfile b/containers/pytorch/training/tpu/2.0/transformers/4.37.2/py310/Dockerfile deleted file mode 100644 index c3346967..00000000 --- a/containers/pytorch/training/tpu/2.0/transformers/4.37.2/py310/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -FROM us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla:r2.1.0_3.10_tpuvm diff --git a/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile b/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile index 86ea0115..29ca15e4 100644 --- a/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile +++ b/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile @@ -13,7 +13,6 @@ ARG TRL='0.7.10' ARG DATASETS='2.16.1' ARG ACCELERATE='0.27.0' ARG EVALUATE='0.4.1' -ARG SENTENCE_TRANSFORMERS='2.3.1' ARG NOTEBOOK='7.1.0' RUN apt-get update \ @@ -46,7 +45,6 @@ RUN pip install --upgrade --no-cache-dir \ evaluate==${EVALUATE} \ peft==${PEFT} \ trl==${TRL} \ - sentence-transformers==${SENTENCE_TRANSFORMERS} \ notebook==${NOTEBOOK} #Install Google Cloud Dependencies @@ -65,7 +63,7 @@ RUN pip install --upgrade --no-cache-dir \ google-cloud-logging # Check if correct versions are installed -RUN python -c "import transformers, diffusers, datasets, accelerate, evaluate, peft, trl, sentence_transformers, torch; \ +RUN python -c "import transformers, diffusers, datasets, accelerate, evaluate, peft, trl, torch; \ assert all([mod.__version__ == version for mod, version in [(transformers, '${TRANSFORMERS}'), (diffusers, '${DIFFUSERS}'), \ (datasets, '${DATASETS}'), (accelerate, '${ACCELERATE}'), (evaluate, '${EVALUATE}'), (peft, '${PEFT}'), (trl, '${TRL}'), \ - (sentence_transformers, '${SENTENCE_TRANSFORMERS}'), (torch, '2.1.0')]])" \ No newline at end of file + (torch, '2.1.0')]])" \ No newline at end of file From 54474039ae3841e5219669bb3273283003d0b78f Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Wed, 21 Feb 2024 16:33:43 +0100 Subject: [PATCH 05/21] Add example with PyTorch_XLA TPU DLC --- .../text-classification/README.md | 101 ++++++++++++ ...-emotion-classification-pytorch-xla-tpu.py | 146 ++++++++++++++++++ 2 files changed, 247 insertions(+) create mode 100644 examples/google-cloud-tpu-vm/text-classification/README.md create mode 100644 examples/google-cloud-tpu-vm/text-classification/bert-emotion-classification-pytorch-xla-tpu.py diff --git a/examples/google-cloud-tpu-vm/text-classification/README.md b/examples/google-cloud-tpu-vm/text-classification/README.md new file mode 100644 index 00000000..90b1a85d --- /dev/null +++ b/examples/google-cloud-tpu-vm/text-classification/README.md @@ -0,0 +1,101 @@ +# Train BERT for emotion classification using Hugging Face PyTorch TPU DLC on Google Cloud TPU(v5e) + +This example demonstrates how to train a emotion classification model using Hugging Face's DLCs on Google Cloud TPU(v5e). We use the [transformers](https://huggingface.co/docs/transformers/) library to fine-tune a pre-trained BERT model for emotion classification. The dataset used for this example is the [dair-ai/emotion ](https://huggingface.co/datasets/dair-ai/emotion) dataset from Hugging Face's [datasets](https://huggingface.co/docs/datasets/en/index) library. + + + +## What are TPUs? + +Google Cloud TPUs are custom-designed AI accelerators, which are optimized for training and inference of large AI models. They are ideal for a variety of use cases, such as chatbots, code generation, media content generation, synthetic speech, vision services, recommendation engines, personalization models, among others. + +Advantages of using TPUs include: + +- Designed to scale cost-efficiently for a wide range of AI workloads, spanning training, fine-tuning, and inference. +- Optimized for TensorFlow, PyTorch, and JAX, and are available in a variety of form factors, including edge devices, workstations, and cloud-based infrastructure. +- TPUs are available in [Google Cloud](https://cloud.google.com/tpu/docs/intro-to-tpu), and has been integrated with [Vertex AI](https://cloud.google.com/vertex-ai/docs/training/training-with-tpu-vm), and [Google Kubernetes Engine (GKE)](https://cloud.google.com/tpu?hl=en#cloud-tpu-in-gke). +- + +## Before you begin + +Make sure you have the following: +- A Google Cloud project with billing enabled. + +- [Google Cloud CLI](https://cloud.google.com/sdk/docs/install#linux) installed on your local machine. + +For installing Google Cloud CLI, you can use the following commands: + +```bash +curl https://sdk.cloud.google.com | bash +exec zsh -l +gcloud init +``` + +You can configure your Google Cloud project using the following command: + +```bash +gcloud auth login +gcloud config set project +gcloud auth application-default login +``` + +Enable the Compute Engine and Cloud TPU APIs using the following commands: + +```bash +gcloud services enable compute.googleapis.com +gcloud services enable tpu.googleapis.com +``` + + +## Spin up a TPU VM on Google Cloud + +We will be using [Cloud TPU v5e](https://cloud.google.com/tpu/docs/v5e-training), Google Cloud's latest generation AI accelerator. To [set up a TPU VM](https://cloud.google.com/tpu/docs/setup-gcp-account#set-up-env), follow the steps below: + + + +```bash +gcloud alpha compute tpus tpu-vm create dev-tpu-vm \ +--zone=us-west4-a \ +--accelerator-type=v5litepod-8 \ +--version v2-alpha-tpuv5-lite +``` + +After some time, the TPU VM will be created. You can see the list of TPU VMs in [Google Cloud console](https://console.cloud.google.com/compute/tpus). + + +## Set up the environment + +Once, the Cloud TPU VM is up and running, you can SSH into the VM using the following command: + +```bash +gcloud alpha compute tpus tpu-vm ssh dev-tpu-vm --zone=us-west4-a +``` + + +You now need to build the environment using Hugging Face's PyTorch TPU DLC [Dockerfile](https://github.com/huggingface/Google-Cloud-Containers/blob/feature/pytorch-tpu-container/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile). You can use the following commands to build the environment: + +```bash +git clone https://github.com/huggingface/Google-Cloud-Containers.git +cd Google-Cloud-Containers +sudo docker build -t huggingface-pytorch-training-tpu-2.1.transformers.4.37.2.py310:latest -f containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile . +``` + +## Train the model +Once, the docker image is built, we need to run the docker container in order to activate the enviroment. You can use the following commands to run the docker container: + +```bash +sudo docker run -it -v $(pwd):/workspace --privileged huggingface-pytorch-training-tpu-2.1.transformers.4.37.2.py310:latest bash +``` + +Now, you can run the following commands to train the model: + +```bash +cd /workspace +python google-partnership/Google-Cloud-Containers/examples/google-cloud-tpu-vm/text-classification/train.py \ +--model_id bert-base-uncased \ +--num_epochs 3 \ +--num_workers 8 \ +--train_batch_size 16 \ +--test_batch_size 16 \ +--num_cores 8 \ +--lr 1e-2 +``` \ No newline at end of file diff --git a/examples/google-cloud-tpu-vm/text-classification/bert-emotion-classification-pytorch-xla-tpu.py b/examples/google-cloud-tpu-vm/text-classification/bert-emotion-classification-pytorch-xla-tpu.py new file mode 100644 index 00000000..4c3fde5b --- /dev/null +++ b/examples/google-cloud-tpu-vm/text-classification/bert-emotion-classification-pytorch-xla-tpu.py @@ -0,0 +1,146 @@ +import torch +import torch_xla +import torch_xla.core.xla_model as xm +import torch.distributed as dist +import torch_xla.distributed.xla_multiprocessing as xmp +from torch_xla import runtime as xr +import torch_xla.distributed.parallel_loader as pl +import argparse +from transformers import AutoTokenizer, AutoModelForSequenceClassification, AdamW, get_linear_schedule_with_warmup +from datasets import load_dataset +import numpy as np + + +def train_model(args): + raw_dataset = load_dataset("dair-ai/emotion") + raw_dataset = raw_dataset.rename_column("label", "labels") # to match Trainer requirements + # Load Tokenizer + tokenizer = AutoTokenizer.from_pretrained(args.model_id) + + def preprocess_dataset(raw_dataset): + # Tokenize helper function + def tokenize(batch): + return tokenizer(batch['text'], padding='max_length', truncation=True,return_tensors="pt") + tokenized_dataset = raw_dataset.map(tokenize, batched=True, remove_columns=["text"]) + tokenized_dataset = tokenized_dataset.with_format("torch") + return tokenized_dataset + + tokenized_dataset = preprocess_dataset(raw_dataset) + train_sampler, test_sampler = None, None + if xm.xrt_world_size() > 1: + xm.master_print(f"Training with: {xm.xrt_world_size()} TPU cores") + train_sampler = torch.utils.data.distributed.DistributedSampler( + tokenized_dataset["train"], + num_replicas=xm.xrt_world_size(), + rank=xm.get_ordinal(), + shuffle=True, + ) + test_sampler = torch.utils.data.distributed.DistributedSampler( + tokenized_dataset["test"], + num_replicas=xm.xrt_world_size(), + rank=xm.get_ordinal(), + shuffle=False) + + train_dataloader = torch.utils.data.DataLoader( + tokenized_dataset["train"], + sampler=train_sampler, + batch_size=args.train_batch_size, + drop_last=True, + num_workers=args.num_workers, + ) + + test_dataloader = torch.utils.data.DataLoader( + tokenized_dataset["test"], + sampler=test_sampler, + batch_size=args.test_batch_size, + drop_last=True, + num_workers=args.num_workers, + ) + + # Scale learning rate to num cores + lr = args.lr * xm.xrt_world_size() + device = xm.xla_device() + + # Prepare model labels - useful for inference + labels = tokenized_dataset["train"].features["labels"].names + num_labels = len(labels) + label2id, id2label = dict(), dict() + for i, label in enumerate(labels): + label2id[label] = str(i) + id2label[str(i)] = label + + # Download the model from huggingface.co/models + model = AutoModelForSequenceClassification.from_pretrained( + args.model_id, num_labels=num_labels, label2id=label2id, id2label=id2label + ) + model = model.to(device) + + # Synchronize model parameters across replicas manually. + if xr.using_pjrt(): + xm.broadcast_master_param(model) + num_train_steps = int(len(tokenized_dataset["train"]) / args.train_batch_size / xm.xrt_world_size() * args.num_epochs) + xm.master_print(f'num_train_steps = {num_train_steps}, world_size={xm.xrt_world_size()}') + + optimizer = AdamW(params=model.parameters(), lr=lr) + scheduler = get_linear_schedule_with_warmup( + optimizer, + num_warmup_steps=0, + num_training_steps=num_train_steps +) + def single_train_epoch(dataloader): + model.train() + for step, batch in enumerate(dataloader): + optimizer.zero_grad() + outputs = model(**batch) + loss = outputs.loss + loss.backward() + xm.optimizer_step(optimizer) + scheduler.step() + if step % 10 == 0: + xm.master_print(f'step={step}, loss={loss}') + def single_test_epoch(dataloader): + model.eval() + total_samples, num_corrects = 0, 0 + for step, batch in enumerate(dataloader): + with torch.no_grad(): + outputs = model(**batch) + preds = outputs.logits.argmax(dim=-1) + num_corrects += preds.eq(batch['labels'].view_as(preds)).sum() + total_samples += batch['labels'].size(0) + + acc = 100.0 * num_corrects.item() / total_samples + acc = xm.mesh_reduce('test_accuracy', acc, np.mean) + return acc + + train_device_loader = pl.MpDeviceLoader(train_dataloader, device) + test_device_loader = pl.MpDeviceLoader(test_dataloader, device) + + for epoch in range(args.num_epochs): + xm.master_print(f'Epoch {epoch} training begin') + single_train_epoch(train_device_loader) + xm.master_print(f'Epoch {epoch} training end') + xm.master_print(f'Epoch {epoch} testing begin') + acc = single_test_epoch(test_device_loader) + xm.master_print(f'Test-Accuracy: {acc:.2f}% after Epoch {epoch}') + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--model_id', default='bert-base-uncased', type=str) + parser.add_argument('--num_workers', default=8, type=int) + parser.add_argument('--num_cores', default=8, type=int) + parser.add_argument('--num_epochs', default=3, type=int) + parser.add_argument('--test_batch_size', default=16, type=int) + parser.add_argument('--train_batch_size', default=16, type=int) + parser.add_argument('--lr', default=1e-4, type=float) + args = parser.parse_args() + return args + +def _mp_fn(index, args): + torch.set_default_dtype(torch.float32) + train_model(args) + + +if __name__ == '__main__': + args = parse_args() + xmp.spawn(_mp_fn, args=(args,), nprocs=args.num_cores) \ No newline at end of file From 9b2fb84c75d2cd531c9d352243f3c8ec66aed8d9 Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Wed, 21 Feb 2024 17:27:10 +0100 Subject: [PATCH 06/21] Update README to add Single-Host --- examples/google-cloud-tpu-vm/text-classification/README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/examples/google-cloud-tpu-vm/text-classification/README.md b/examples/google-cloud-tpu-vm/text-classification/README.md index 90b1a85d..dec81df8 100644 --- a/examples/google-cloud-tpu-vm/text-classification/README.md +++ b/examples/google-cloud-tpu-vm/text-classification/README.md @@ -1,6 +1,6 @@ # Train BERT for emotion classification using Hugging Face PyTorch TPU DLC on Google Cloud TPU(v5e) -This example demonstrates how to train a emotion classification model using Hugging Face's DLCs on Google Cloud TPU(v5e). We use the [transformers](https://huggingface.co/docs/transformers/) library to fine-tune a pre-trained BERT model for emotion classification. The dataset used for this example is the [dair-ai/emotion ](https://huggingface.co/datasets/dair-ai/emotion) dataset from Hugging Face's [datasets](https://huggingface.co/docs/datasets/en/index) library. +This example demonstrates how to train a emotion classification model using Hugging Face's DLCs on Google Cloud single-host TPU(v5e) VM. We use the [transformers](https://huggingface.co/docs/transformers/) library to fine-tune a pre-trained BERT model for emotion classification. The dataset used for this example is the [dair-ai/emotion ](https://huggingface.co/datasets/dair-ai/emotion) dataset from Hugging Face's [datasets](https://huggingface.co/docs/datasets/en/index) library. @@ -12,7 +12,7 @@ Advantages of using TPUs include: - Designed to scale cost-efficiently for a wide range of AI workloads, spanning training, fine-tuning, and inference. - Optimized for TensorFlow, PyTorch, and JAX, and are available in a variety of form factors, including edge devices, workstations, and cloud-based infrastructure. -- TPUs are available in [Google Cloud](https://cloud.google.com/tpu/docs/intro-to-tpu), and has been integrated with [Vertex AI](https://cloud.google.com/vertex-ai/docs/training/training-with-tpu-vm), and [Google Kubernetes Engine (GKE)](https://cloud.google.com/tpu?hl=en#cloud-tpu-in-gke). +- TPUs are available in [Google Cloud](https://cloud.google.com/tpu/docs/intro-to-tpu), and have been integrated with [Vertex AI](https://cloud.google.com/vertex-ai/docs/training/training-with-tpu-vm), and [Google Kubernetes Engine (GKE)](https://cloud.google.com/tpu?hl=en#cloud-tpu-in-gke). - ## Before you begin @@ -48,7 +48,8 @@ gcloud services enable tpu.googleapis.com ## Spin up a TPU VM on Google Cloud -We will be using [Cloud TPU v5e](https://cloud.google.com/tpu/docs/v5e-training), Google Cloud's latest generation AI accelerator. To [set up a TPU VM](https://cloud.google.com/tpu/docs/setup-gcp-account#set-up-env), follow the steps below: +We will be using [Cloud TPU v5e](https://cloud.google.com/tpu/docs/v5e-training), Google Cloud's latest generation AI accelerator. We will setup a single-host TPU(v5e) VM to train the model. You can read more about Single-host and Multi-host TPU VMs on [Google Cloud TPU configurations](https://cloud.google.com/tpu/docs/supported-tpu-configurations). +To [set up a TPU VM](https://cloud.google.com/tpu/docs/setup-gcp-account#set-up-env), follow the steps below: From de6c186e0f646a84b11860c3e829585d46c35254 Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Wed, 21 Feb 2024 17:33:39 +0100 Subject: [PATCH 07/21] Add more info about different hosts --- .../google-cloud-tpu-vm/text-classification/README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/google-cloud-tpu-vm/text-classification/README.md b/examples/google-cloud-tpu-vm/text-classification/README.md index dec81df8..30990c6d 100644 --- a/examples/google-cloud-tpu-vm/text-classification/README.md +++ b/examples/google-cloud-tpu-vm/text-classification/README.md @@ -3,7 +3,6 @@ This example demonstrates how to train a emotion classification model using Hugging Face's DLCs on Google Cloud single-host TPU(v5e) VM. We use the [transformers](https://huggingface.co/docs/transformers/) library to fine-tune a pre-trained BERT model for emotion classification. The dataset used for this example is the [dair-ai/emotion ](https://huggingface.co/datasets/dair-ai/emotion) dataset from Hugging Face's [datasets](https://huggingface.co/docs/datasets/en/index) library. - ## What are TPUs? Google Cloud TPUs are custom-designed AI accelerators, which are optimized for training and inference of large AI models. They are ideal for a variety of use cases, such as chatbots, code generation, media content generation, synthetic speech, vision services, recommendation engines, personalization models, among others. @@ -48,7 +47,12 @@ gcloud services enable tpu.googleapis.com ## Spin up a TPU VM on Google Cloud -We will be using [Cloud TPU v5e](https://cloud.google.com/tpu/docs/v5e-training), Google Cloud's latest generation AI accelerator. We will setup a single-host TPU(v5e) VM to train the model. You can read more about Single-host and Multi-host TPU VMs on [Google Cloud TPU configurations](https://cloud.google.com/tpu/docs/supported-tpu-configurations). +We will be using [Cloud TPU v5e](https://cloud.google.com/tpu/docs/v5e-training), Google Cloud's latest generation AI accelerator. We will setup a single-host TPU(v5e) VM to train the model. + +You can read more about Single-host(8 chips) and Multi-host(> 8 chips) TPU VMs on [Google Cloud TPU configurations](https://cloud.google.com/tpu/docs/supported-tpu-configurations). + +Note: Steps to run the example would differ for multi-host TPU VMs. One would need to use [SAX](https://github.com/google/saxml) for multi-host training and multi-host inference. + To [set up a TPU VM](https://cloud.google.com/tpu/docs/setup-gcp-account#set-up-env), follow the steps below: From 4680bcd410fad547b63e9cfd45a31f781808df67 Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Thu, 22 Feb 2024 22:49:22 +0100 Subject: [PATCH 08/21] Replace manual training script with trainer --- ...-emotion-classification-pytorch-xla-tpu.py | 171 +++++++----------- 1 file changed, 68 insertions(+), 103 deletions(-) diff --git a/examples/google-cloud-tpu-vm/text-classification/bert-emotion-classification-pytorch-xla-tpu.py b/examples/google-cloud-tpu-vm/text-classification/bert-emotion-classification-pytorch-xla-tpu.py index 4c3fde5b..cbbaf736 100644 --- a/examples/google-cloud-tpu-vm/text-classification/bert-emotion-classification-pytorch-xla-tpu.py +++ b/examples/google-cloud-tpu-vm/text-classification/bert-emotion-classification-pytorch-xla-tpu.py @@ -1,66 +1,49 @@ +import argparse + import torch import torch_xla import torch_xla.core.xla_model as xm -import torch.distributed as dist import torch_xla.distributed.xla_multiprocessing as xmp -from torch_xla import runtime as xr -import torch_xla.distributed.parallel_loader as pl -import argparse -from transformers import AutoTokenizer, AutoModelForSequenceClassification, AdamW, get_linear_schedule_with_warmup from datasets import load_dataset -import numpy as np +from transformers import ( + AutoModelForSequenceClassification, + AutoTokenizer, + Trainer, + TrainingArguments, +) def train_model(args): raw_dataset = load_dataset("dair-ai/emotion") - raw_dataset = raw_dataset.rename_column("label", "labels") # to match Trainer requirements + raw_dataset = raw_dataset.rename_column( + "label", "labels" + ) # to match Trainer requirements # Load Tokenizer tokenizer = AutoTokenizer.from_pretrained(args.model_id) - + def preprocess_dataset(raw_dataset): # Tokenize helper function def tokenize(batch): - return tokenizer(batch['text'], padding='max_length', truncation=True,return_tensors="pt") - tokenized_dataset = raw_dataset.map(tokenize, batched=True, remove_columns=["text"]) + return tokenizer( + batch["text"], + padding="max_length", + truncation=True, + return_tensors="pt", + ) + + tokenized_dataset = raw_dataset.map( + tokenize, batched=True, remove_columns=["text"] + ) tokenized_dataset = tokenized_dataset.with_format("torch") return tokenized_dataset - + tokenized_dataset = preprocess_dataset(raw_dataset) - train_sampler, test_sampler = None, None - if xm.xrt_world_size() > 1: - xm.master_print(f"Training with: {xm.xrt_world_size()} TPU cores") - train_sampler = torch.utils.data.distributed.DistributedSampler( - tokenized_dataset["train"], - num_replicas=xm.xrt_world_size(), - rank=xm.get_ordinal(), - shuffle=True, - ) - test_sampler = torch.utils.data.distributed.DistributedSampler( - tokenized_dataset["test"], - num_replicas=xm.xrt_world_size(), - rank=xm.get_ordinal(), - shuffle=False) - - train_dataloader = torch.utils.data.DataLoader( - tokenized_dataset["train"], - sampler=train_sampler, - batch_size=args.train_batch_size, - drop_last=True, - num_workers=args.num_workers, - ) - - test_dataloader = torch.utils.data.DataLoader( - tokenized_dataset["test"], - sampler=test_sampler, - batch_size=args.test_batch_size, - drop_last=True, - num_workers=args.num_workers, - ) # Scale learning rate to num cores lr = args.lr * xm.xrt_world_size() device = xm.xla_device() - + xm.master_print(f"Current TpU: {device}, total-TPU={xm.xrt_world_size()}") + # Prepare model labels - useful for inference labels = tokenized_dataset["train"].features["labels"].names num_labels = len(labels) @@ -73,74 +56,56 @@ def tokenize(batch): model = AutoModelForSequenceClassification.from_pretrained( args.model_id, num_labels=num_labels, label2id=label2id, id2label=id2label ) - model = model.to(device) - - # Synchronize model parameters across replicas manually. - if xr.using_pjrt(): - xm.broadcast_master_param(model) - num_train_steps = int(len(tokenized_dataset["train"]) / args.train_batch_size / xm.xrt_world_size() * args.num_epochs) - xm.master_print(f'num_train_steps = {num_train_steps}, world_size={xm.xrt_world_size()}') - - optimizer = AdamW(params=model.parameters(), lr=lr) - scheduler = get_linear_schedule_with_warmup( - optimizer, - num_warmup_steps=0, - num_training_steps=num_train_steps -) - def single_train_epoch(dataloader): - model.train() - for step, batch in enumerate(dataloader): - optimizer.zero_grad() - outputs = model(**batch) - loss = outputs.loss - loss.backward() - xm.optimizer_step(optimizer) - scheduler.step() - if step % 10 == 0: - xm.master_print(f'step={step}, loss={loss}') - def single_test_epoch(dataloader): - model.eval() - total_samples, num_corrects = 0, 0 - for step, batch in enumerate(dataloader): - with torch.no_grad(): - outputs = model(**batch) - preds = outputs.logits.argmax(dim=-1) - num_corrects += preds.eq(batch['labels'].view_as(preds)).sum() - total_samples += batch['labels'].size(0) - - acc = 100.0 * num_corrects.item() / total_samples - acc = xm.mesh_reduce('test_accuracy', acc, np.mean) - return acc - - train_device_loader = pl.MpDeviceLoader(train_dataloader, device) - test_device_loader = pl.MpDeviceLoader(test_dataloader, device) - - for epoch in range(args.num_epochs): - xm.master_print(f'Epoch {epoch} training begin') - single_train_epoch(train_device_loader) - xm.master_print(f'Epoch {epoch} training end') - xm.master_print(f'Epoch {epoch} testing begin') - acc = single_test_epoch(test_device_loader) - xm.master_print(f'Test-Accuracy: {acc:.2f}% after Epoch {epoch}') - - + + num_train_steps = int( + len(tokenized_dataset["train"]) + / args.train_batch_size + / xm.xrt_world_size() + * args.num_epochs + ) + + ## Define training arguments + training_args = TrainingArguments( + output_dir="output", + per_device_train_batch_size=args.train_batch_size, + per_device_eval_batch_size=args.test_batch_size, + learning_rate=lr, + num_train_epochs=args.num_epochs, + evaluation_strategy="epoch", + logging_strategy="steps", + logging_steps=10, + ) + # Initialize our Trainer + trainer = Trainer( + model=model, + args=training_args, + train_dataset=tokenized_dataset["train"], + eval_dataset=tokenized_dataset["test"], + tokenizer=tokenizer, + ) + + # Train the model + trainer.train() + + def parse_args(): parser = argparse.ArgumentParser() - parser.add_argument('--model_id', default='bert-base-uncased', type=str) - parser.add_argument('--num_workers', default=8, type=int) - parser.add_argument('--num_cores', default=8, type=int) - parser.add_argument('--num_epochs', default=3, type=int) - parser.add_argument('--test_batch_size', default=16, type=int) - parser.add_argument('--train_batch_size', default=16, type=int) - parser.add_argument('--lr', default=1e-4, type=float) + parser.add_argument("--model_id", default="bert-base-uncased", type=str) + parser.add_argument("--num_workers", default=8, type=int) + parser.add_argument("--num_cores", default=8, type=int) + parser.add_argument("--num_epochs", default=3, type=int) + parser.add_argument("--test_batch_size", default=16, type=int) + parser.add_argument("--train_batch_size", default=16, type=int) + parser.add_argument("--lr", default=1e-4, type=float) args = parser.parse_args() return args + def _mp_fn(index, args): torch.set_default_dtype(torch.float32) train_model(args) - -if __name__ == '__main__': + +if __name__ == "__main__": args = parse_args() - xmp.spawn(_mp_fn, args=(args,), nprocs=args.num_cores) \ No newline at end of file + xmp.spawn(_mp_fn, args=(args,), nprocs=args.num_cores) From 0901c5904e436ed70fcee31ea49bbdb4388f47bc Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Fri, 23 Feb 2024 15:17:49 +0100 Subject: [PATCH 09/21] Add Dolly, use TRL and OPT-350M --- .../README.md | 14 +-- .../peft_lora_trl_dolly_clm.py | 107 +++++++++++++++++ ...-emotion-classification-pytorch-xla-tpu.py | 111 ------------------ 3 files changed, 113 insertions(+), 119 deletions(-) rename examples/google-cloud-tpu-vm/{text-classification => causal-language-modeling}/README.md (85%) create mode 100644 examples/google-cloud-tpu-vm/causal-language-modeling/peft_lora_trl_dolly_clm.py delete mode 100644 examples/google-cloud-tpu-vm/text-classification/bert-emotion-classification-pytorch-xla-tpu.py diff --git a/examples/google-cloud-tpu-vm/text-classification/README.md b/examples/google-cloud-tpu-vm/causal-language-modeling/README.md similarity index 85% rename from examples/google-cloud-tpu-vm/text-classification/README.md rename to examples/google-cloud-tpu-vm/causal-language-modeling/README.md index 30990c6d..f703906b 100644 --- a/examples/google-cloud-tpu-vm/text-classification/README.md +++ b/examples/google-cloud-tpu-vm/causal-language-modeling/README.md @@ -1,6 +1,6 @@ -# Train BERT for emotion classification using Hugging Face PyTorch TPU DLC on Google Cloud TPU(v5e) +# Finetune Facebook OPT-350M on Dolly using Hugging Face PyTorch TPU DLC on Google Cloud TPU(v5e) -This example demonstrates how to train a emotion classification model using Hugging Face's DLCs on Google Cloud single-host TPU(v5e) VM. We use the [transformers](https://huggingface.co/docs/transformers/) library to fine-tune a pre-trained BERT model for emotion classification. The dataset used for this example is the [dair-ai/emotion ](https://huggingface.co/datasets/dair-ai/emotion) dataset from Hugging Face's [datasets](https://huggingface.co/docs/datasets/en/index) library. +This example demonstrates how to finetune [Facebook OPT-350M](https://huggingface.co/facebook/opt-350m) using Hugging Face's DLCs on Google Cloud single-host TPU(v5e) VM. We use the [transformers](https://huggingface.co/docs/transformers/), [TRL](https://huggingface.co/docs/trl/en/index), and [PEFT](https://huggingface.co/docs/peft/index) library to fine-tune. The dataset used for this example is the [Doly-15k](databricks/databricks-dolly-15k) dataset which can be easily accessed from Hugging Face's [Datasets](https://huggingface.co/datasets) Hub. ## What are TPUs? @@ -95,12 +95,10 @@ Now, you can run the following commands to train the model: ```bash cd /workspace -python google-partnership/Google-Cloud-Containers/examples/google-cloud-tpu-vm/text-classification/train.py \ ---model_id bert-base-uncased \ +python google-partnership/Google-Cloud-Containers/examples/google-cloud-tpu-vm/causal-language-modeling/peft_lora_trl_dolly_clm.py \ +--model_id facebook/opt-350m \ --num_epochs 3 \ ---num_workers 8 \ ---train_batch_size 16 \ ---test_batch_size 16 \ +--train_batch_size 8 \ --num_cores 8 \ ---lr 1e-2 +--lr 3e-4 ``` \ No newline at end of file diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/peft_lora_trl_dolly_clm.py b/examples/google-cloud-tpu-vm/causal-language-modeling/peft_lora_trl_dolly_clm.py new file mode 100644 index 00000000..82dd1e6c --- /dev/null +++ b/examples/google-cloud-tpu-vm/causal-language-modeling/peft_lora_trl_dolly_clm.py @@ -0,0 +1,107 @@ +import argparse + +import torch +import torch_xla +import torch_xla.core.xla_model as xm +import torch_xla.distributed.xla_multiprocessing as xmp +from datasets import load_dataset +from peft import LoraConfig, TaskType +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, + DataCollatorForLanguageModeling, + TrainingArguments, +) +from trl import SFTTrainer + + +def train_model(args): + raw_dataset = load_dataset("databricks/databricks-dolly-15k", split="train") + + def format_dolly(sample): + instruction = f"### Instruction\n{sample['instruction']}" + context = ( + f"### Context\n{sample['context']}" if len(sample["context"]) > 0 else None + ) + response = f"### Answer\n{sample['response']}" + # join all the parts together + prompt = "\n\n".join( + [i for i in [instruction, context, response] if i is not None] + ) + sample["text"] = prompt + return sample + + # apply prompt template + format_dataset = raw_dataset.map( + format_dolly, remove_columns=list(raw_dataset.features) + ) + + # Load Tokenizer + tokenizer = AutoTokenizer.from_pretrained(args.model_id) + tokenizer.pad_token = tokenizer.eos_token + + # Scale learning rate to num cores + lr = args.lr * xm.xrt_world_size() + device = xm.xla_device() + + # Load model + model = AutoModelForCausalLM.from_pretrained( + args.model_id, torch_dtype=torch.bfloat16 + ) + lora_config = LoraConfig( + r=16, + target_modules=["q_proj", "v_proj"], + task_type=TaskType.CAUSAL_LM, + lora_alpha=32, + lora_dropout=0.05, + ) + + # Define training arguments + training_args = TrainingArguments( + output_dir="output", + per_device_train_batch_size=args.train_batch_size, + per_device_eval_batch_size=args.test_batch_size, + learning_rate=lr, + gradient_accumulation_steps=2, # number of steps before performing a backward/update pass + gradient_checkpointing=True, # use gradient checkpointing to save memory + optim="adamw_torch_fused", + num_train_epochs=args.num_epochs, + logging_strategy="steps", + logging_steps=10, + bf16=True, + ) + + # Initialize our Trainer + trainer = SFTTrainer( + model=model, + peft_config=lora_config, + args=training_args, + dataset_text_field="text", + packing=True, + train_dataset=format_dataset, + tokenizer=tokenizer, + data_collator=DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False), + ) + # Train the model + trainer.train() + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--model_id", default="facebook/opt-125m", type=str) + parser.add_argument("--num_cores", default=8, type=int) + parser.add_argument("--num_epochs", default=3, type=int) + parser.add_argument("--train_batch_size", default=16, type=int) + parser.add_argument("--lr", default=3e-4, type=float) + args = parser.parse_args() + return args + + +def _mp_fn(index, args): + torch.set_default_dtype(torch.bfloat16) + train_model(args) + + +if __name__ == "__main__": + args = parse_args() + xmp.spawn(_mp_fn, args=(args,), nprocs=args.num_cores) diff --git a/examples/google-cloud-tpu-vm/text-classification/bert-emotion-classification-pytorch-xla-tpu.py b/examples/google-cloud-tpu-vm/text-classification/bert-emotion-classification-pytorch-xla-tpu.py deleted file mode 100644 index cbbaf736..00000000 --- a/examples/google-cloud-tpu-vm/text-classification/bert-emotion-classification-pytorch-xla-tpu.py +++ /dev/null @@ -1,111 +0,0 @@ -import argparse - -import torch -import torch_xla -import torch_xla.core.xla_model as xm -import torch_xla.distributed.xla_multiprocessing as xmp -from datasets import load_dataset -from transformers import ( - AutoModelForSequenceClassification, - AutoTokenizer, - Trainer, - TrainingArguments, -) - - -def train_model(args): - raw_dataset = load_dataset("dair-ai/emotion") - raw_dataset = raw_dataset.rename_column( - "label", "labels" - ) # to match Trainer requirements - # Load Tokenizer - tokenizer = AutoTokenizer.from_pretrained(args.model_id) - - def preprocess_dataset(raw_dataset): - # Tokenize helper function - def tokenize(batch): - return tokenizer( - batch["text"], - padding="max_length", - truncation=True, - return_tensors="pt", - ) - - tokenized_dataset = raw_dataset.map( - tokenize, batched=True, remove_columns=["text"] - ) - tokenized_dataset = tokenized_dataset.with_format("torch") - return tokenized_dataset - - tokenized_dataset = preprocess_dataset(raw_dataset) - - # Scale learning rate to num cores - lr = args.lr * xm.xrt_world_size() - device = xm.xla_device() - xm.master_print(f"Current TpU: {device}, total-TPU={xm.xrt_world_size()}") - - # Prepare model labels - useful for inference - labels = tokenized_dataset["train"].features["labels"].names - num_labels = len(labels) - label2id, id2label = dict(), dict() - for i, label in enumerate(labels): - label2id[label] = str(i) - id2label[str(i)] = label - - # Download the model from huggingface.co/models - model = AutoModelForSequenceClassification.from_pretrained( - args.model_id, num_labels=num_labels, label2id=label2id, id2label=id2label - ) - - num_train_steps = int( - len(tokenized_dataset["train"]) - / args.train_batch_size - / xm.xrt_world_size() - * args.num_epochs - ) - - ## Define training arguments - training_args = TrainingArguments( - output_dir="output", - per_device_train_batch_size=args.train_batch_size, - per_device_eval_batch_size=args.test_batch_size, - learning_rate=lr, - num_train_epochs=args.num_epochs, - evaluation_strategy="epoch", - logging_strategy="steps", - logging_steps=10, - ) - # Initialize our Trainer - trainer = Trainer( - model=model, - args=training_args, - train_dataset=tokenized_dataset["train"], - eval_dataset=tokenized_dataset["test"], - tokenizer=tokenizer, - ) - - # Train the model - trainer.train() - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("--model_id", default="bert-base-uncased", type=str) - parser.add_argument("--num_workers", default=8, type=int) - parser.add_argument("--num_cores", default=8, type=int) - parser.add_argument("--num_epochs", default=3, type=int) - parser.add_argument("--test_batch_size", default=16, type=int) - parser.add_argument("--train_batch_size", default=16, type=int) - parser.add_argument("--lr", default=1e-4, type=float) - args = parser.parse_args() - return args - - -def _mp_fn(index, args): - torch.set_default_dtype(torch.float32) - train_model(args) - - -if __name__ == "__main__": - args = parse_args() - xmp.spawn(_mp_fn, args=(args,), nprocs=args.num_cores) From c2e8a3b597d5b61e6f758e88dafeb5ca81e77793 Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Fri, 23 Feb 2024 21:24:25 +0100 Subject: [PATCH 10/21] Push local changes --- .../causal-language-modeling/peft_lora_trl_dolly_clm.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/peft_lora_trl_dolly_clm.py b/examples/google-cloud-tpu-vm/causal-language-modeling/peft_lora_trl_dolly_clm.py index 82dd1e6c..b36ca38a 100644 --- a/examples/google-cloud-tpu-vm/causal-language-modeling/peft_lora_trl_dolly_clm.py +++ b/examples/google-cloud-tpu-vm/causal-language-modeling/peft_lora_trl_dolly_clm.py @@ -60,11 +60,7 @@ def format_dolly(sample): training_args = TrainingArguments( output_dir="output", per_device_train_batch_size=args.train_batch_size, - per_device_eval_batch_size=args.test_batch_size, learning_rate=lr, - gradient_accumulation_steps=2, # number of steps before performing a backward/update pass - gradient_checkpointing=True, # use gradient checkpointing to save memory - optim="adamw_torch_fused", num_train_epochs=args.num_epochs, logging_strategy="steps", logging_steps=10, From e935b17afe77c65b6f008c8ff96a4e5523da2649 Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Mon, 26 Feb 2024 11:56:31 +0100 Subject: [PATCH 11/21] Change name, add transformers trainer example --- .../causal-language-modeling/README.md | 2 +- .../causal-language-modeling/dolly-clm.py | 101 ++++++++++++++++++ ...olly_clm.py => peft-lora-trl-dolly-clm.py} | 0 3 files changed, 102 insertions(+), 1 deletion(-) create mode 100644 examples/google-cloud-tpu-vm/causal-language-modeling/dolly-clm.py rename examples/google-cloud-tpu-vm/causal-language-modeling/{peft_lora_trl_dolly_clm.py => peft-lora-trl-dolly-clm.py} (100%) diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/README.md b/examples/google-cloud-tpu-vm/causal-language-modeling/README.md index f703906b..08cba97e 100644 --- a/examples/google-cloud-tpu-vm/causal-language-modeling/README.md +++ b/examples/google-cloud-tpu-vm/causal-language-modeling/README.md @@ -95,7 +95,7 @@ Now, you can run the following commands to train the model: ```bash cd /workspace -python google-partnership/Google-Cloud-Containers/examples/google-cloud-tpu-vm/causal-language-modeling/peft_lora_trl_dolly_clm.py \ +python google-partnership/Google-Cloud-Containers/examples/google-cloud-tpu-vm/causal-language-modeling/peft-lora-trl-dolly-clm.py \ --model_id facebook/opt-350m \ --num_epochs 3 \ --train_batch_size 8 \ diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/dolly-clm.py b/examples/google-cloud-tpu-vm/causal-language-modeling/dolly-clm.py new file mode 100644 index 00000000..0b7c46e2 --- /dev/null +++ b/examples/google-cloud-tpu-vm/causal-language-modeling/dolly-clm.py @@ -0,0 +1,101 @@ +import argparse + +import torch +import torch_xla +import torch_xla.core.xla_model as xm +import torch_xla.distributed.xla_multiprocessing as xmp +from datasets import load_dataset +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, + DataCollatorForLanguageModeling, + Trainer, + TrainingArguments, +) + + +def train_model(args): + raw_dataset = load_dataset("databricks/databricks-dolly-15k", split="train") + + def format_dolly(sample): + instruction = f"### Instruction\n{sample['instruction']}" + context = ( + f"### Context\n{sample['context']}" if len(sample["context"]) > 0 else None + ) + response = f"### Answer\n{sample['response']}" + # join all the parts together + prompt = "\n\n".join( + [i for i in [instruction, context, response] if i is not None] + ) + sample["text"] = prompt + return sample + + # apply prompt template + format_dataset = raw_dataset.map( + format_dolly, remove_columns=list(raw_dataset.features) + ) + + # Load Tokenizer + tokenizer = AutoTokenizer.from_pretrained(args.model_id) + tokenizer.pad_token = tokenizer.eos_token + + # Tokenize the dataset + tokenized_train_dataset = format_dataset.map( + lambda example: tokenizer( + example["text"], padding="max_length", truncation=True, max_length=1024 + ), + batched=True, + remove_columns=format_dataset.features, + ) + + # Scale learning rate to num cores + lr = args.lr * xm.xrt_world_size() + device = xm.xla_device() + + # Load model + model = AutoModelForCausalLM.from_pretrained( + args.model_id, torch_dtype=torch.bfloat16 + ) + + ## Define training arguments + training_args = TrainingArguments( + output_dir="output", + per_device_train_batch_size=args.train_batch_size, + learning_rate=lr, + num_train_epochs=args.num_epochs, + logging_strategy="steps", + logging_steps=10, + bf16=True, + ) + # Initialize our Trainer + trainer = Trainer( + model=model, + args=training_args, + train_dataset=tokenized_train_dataset, + tokenizer=tokenizer, + data_collator=DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False), + ) + + # Train the model + trainer.train() + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--model_id", default="facebook/opt-125m", type=str) + parser.add_argument("--num_cores", default=8, type=int) + parser.add_argument("--num_epochs", default=3, type=int) + parser.add_argument("--train_batch_size", default=16, type=int) + parser.add_argument("--lr", default=1e-4, type=float) + args = parser.parse_args() + return args + + +def _mp_fn(index, args): + torch.set_default_dtype(torch.bfloat16) + train_model(args) + + +if __name__ == "__main__": + args = parse_args() + xmp.spawn(_mp_fn, args=(args,), nprocs=args.num_cores) diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/peft_lora_trl_dolly_clm.py b/examples/google-cloud-tpu-vm/causal-language-modeling/peft-lora-trl-dolly-clm.py similarity index 100% rename from examples/google-cloud-tpu-vm/causal-language-modeling/peft_lora_trl_dolly_clm.py rename to examples/google-cloud-tpu-vm/causal-language-modeling/peft-lora-trl-dolly-clm.py From 67476115307bd948926c56117353b21232fdad4b Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Wed, 28 Feb 2024 13:07:39 +0100 Subject: [PATCH 12/21] Update dockerfile and example according to nightly version --- .../transformers/4.38.1}/py310/Dockerfile | 18 ++-- .../causal-language-modeling/README.md | 15 ++- .../causal-language-modeling/dolly-clm.py | 101 ------------------ ...ly-clm.py => finetune-gemma-lora-dolly.py} | 41 +++---- 4 files changed, 38 insertions(+), 137 deletions(-) rename containers/pytorch/training/tpu/{2.1/transformers/4.37.2 => 2.3/transformers/4.38.1}/py310/Dockerfile (83%) delete mode 100644 examples/google-cloud-tpu-vm/causal-language-modeling/dolly-clm.py rename examples/google-cloud-tpu-vm/causal-language-modeling/{peft-lora-trl-dolly-clm.py => finetune-gemma-lora-dolly.py} (73%) diff --git a/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile b/containers/pytorch/training/tpu/2.3/transformers/4.38.1/py310/Dockerfile similarity index 83% rename from containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile rename to containers/pytorch/training/tpu/2.3/transformers/4.38.1/py310/Dockerfile index 29ca15e4..829f24eb 100644 --- a/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile +++ b/containers/pytorch/training/tpu/2.3/transformers/4.38.1/py310/Dockerfile @@ -1,19 +1,19 @@ -FROM us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla:r2.1.0_3.10_tpuvm -# The image with PyTorch = 2.1, Python=3.10 +FROM us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla@sha256:8f1dcd5b03f993e4da5c20d17c77aff6a5f22d5455f8eb042d2e4b16ac460526 +# The nightly image with PyTorch = 2.3, Python=3.10 # Read more about it here: https://github.com/pytorch/xla?tab=readme-ov-file#docker LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive # Versions -ARG TRANSFORMERS='4.37.2' -ARG DIFFUSERS='0.26.1' +ARG TRANSFORMERS='4.38.1' +ARG DIFFUSERS='0.26.3' ARG PEFT='0.8.2' -ARG TRL='0.7.10' -ARG DATASETS='2.16.1' -ARG ACCELERATE='0.27.0' +ARG TRL='0.7.11' +ARG DATASETS='2.17.1' +ARG ACCELERATE='0.27.2' ARG EVALUATE='0.4.1' -ARG NOTEBOOK='7.1.0' +ARG NOTEBOOK='7.1.1' RUN apt-get update \ && apt-get install -y \ @@ -66,4 +66,4 @@ RUN pip install --upgrade --no-cache-dir \ RUN python -c "import transformers, diffusers, datasets, accelerate, evaluate, peft, trl, torch; \ assert all([mod.__version__ == version for mod, version in [(transformers, '${TRANSFORMERS}'), (diffusers, '${DIFFUSERS}'), \ (datasets, '${DATASETS}'), (accelerate, '${ACCELERATE}'), (evaluate, '${EVALUATE}'), (peft, '${PEFT}'), (trl, '${TRL}'), \ - (torch, '2.1.0')]])" \ No newline at end of file + (torch, '2.3.0')]])" \ No newline at end of file diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/README.md b/examples/google-cloud-tpu-vm/causal-language-modeling/README.md index 08cba97e..8c65440c 100644 --- a/examples/google-cloud-tpu-vm/causal-language-modeling/README.md +++ b/examples/google-cloud-tpu-vm/causal-language-modeling/README.md @@ -1,6 +1,6 @@ -# Finetune Facebook OPT-350M on Dolly using Hugging Face PyTorch TPU DLC on Google Cloud TPU(v5e) +# Finetune Gemma-2B using Hugging Face PyTorch TPU DLC on Google Cloud TPU(v5e) -This example demonstrates how to finetune [Facebook OPT-350M](https://huggingface.co/facebook/opt-350m) using Hugging Face's DLCs on Google Cloud single-host TPU(v5e) VM. We use the [transformers](https://huggingface.co/docs/transformers/), [TRL](https://huggingface.co/docs/trl/en/index), and [PEFT](https://huggingface.co/docs/peft/index) library to fine-tune. The dataset used for this example is the [Doly-15k](databricks/databricks-dolly-15k) dataset which can be easily accessed from Hugging Face's [Datasets](https://huggingface.co/datasets) Hub. +This example demonstrates how to finetune [gemma-2b](https://huggingface.co/google/gemma-2b) using Hugging Face's DLCs on Google Cloud single-host TPU(v5e) VM. We use the [transformers](https://huggingface.co/docs/transformers/), [TRL](https://huggingface.co/docs/trl/en/index), and [PEFT](https://huggingface.co/docs/peft/index) library to fine-tune. The dataset used for this example is the [Dolly-15k](databricks/databricks-dolly-15k) dataset which can be easily accessed from Hugging Face's [Datasets](https://huggingface.co/datasets) Hub. ## What are TPUs? @@ -81,24 +81,23 @@ You now need to build the environment using Hugging Face's PyTorch TPU DLC [Dock ```bash git clone https://github.com/huggingface/Google-Cloud-Containers.git cd Google-Cloud-Containers -sudo docker build -t huggingface-pytorch-training-tpu-2.1.transformers.4.37.2.py310:latest -f containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile . +sudo docker build -t huggingface-pytorch-training-tpu-2.3.transformers.4.38.1.py310:latest -f containers/pytorch/training/tpu/2.3/transformers/4.38.1/py310/Dockerfile . ``` ## Train the model Once, the docker image is built, we need to run the docker container in order to activate the enviroment. You can use the following commands to run the docker container: ```bash -sudo docker run -it -v $(pwd):/workspace --privileged huggingface-pytorch-training-tpu-2.1.transformers.4.37.2.py310:latest bash +sudo docker run -it -v $(pwd):/workspace --privileged huggingface-pytorch-training-tpu-2.3.transformers.4.38.1.py310:latest bash ``` Now, you can run the following commands to train the model: ```bash +export PJRT_DEVICE=TPU XLA_USE_BF16=1 XLA_USE_SPMD=1 cd /workspace -python google-partnership/Google-Cloud-Containers/examples/google-cloud-tpu-vm/causal-language-modeling/peft-lora-trl-dolly-clm.py \ ---model_id facebook/opt-350m \ +python examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py \ --num_epochs 3 \ ---train_batch_size 8 \ ---num_cores 8 \ +--train_batch_size 16 \ --lr 3e-4 ``` \ No newline at end of file diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/dolly-clm.py b/examples/google-cloud-tpu-vm/causal-language-modeling/dolly-clm.py deleted file mode 100644 index 0b7c46e2..00000000 --- a/examples/google-cloud-tpu-vm/causal-language-modeling/dolly-clm.py +++ /dev/null @@ -1,101 +0,0 @@ -import argparse - -import torch -import torch_xla -import torch_xla.core.xla_model as xm -import torch_xla.distributed.xla_multiprocessing as xmp -from datasets import load_dataset -from transformers import ( - AutoModelForCausalLM, - AutoTokenizer, - DataCollatorForLanguageModeling, - Trainer, - TrainingArguments, -) - - -def train_model(args): - raw_dataset = load_dataset("databricks/databricks-dolly-15k", split="train") - - def format_dolly(sample): - instruction = f"### Instruction\n{sample['instruction']}" - context = ( - f"### Context\n{sample['context']}" if len(sample["context"]) > 0 else None - ) - response = f"### Answer\n{sample['response']}" - # join all the parts together - prompt = "\n\n".join( - [i for i in [instruction, context, response] if i is not None] - ) - sample["text"] = prompt - return sample - - # apply prompt template - format_dataset = raw_dataset.map( - format_dolly, remove_columns=list(raw_dataset.features) - ) - - # Load Tokenizer - tokenizer = AutoTokenizer.from_pretrained(args.model_id) - tokenizer.pad_token = tokenizer.eos_token - - # Tokenize the dataset - tokenized_train_dataset = format_dataset.map( - lambda example: tokenizer( - example["text"], padding="max_length", truncation=True, max_length=1024 - ), - batched=True, - remove_columns=format_dataset.features, - ) - - # Scale learning rate to num cores - lr = args.lr * xm.xrt_world_size() - device = xm.xla_device() - - # Load model - model = AutoModelForCausalLM.from_pretrained( - args.model_id, torch_dtype=torch.bfloat16 - ) - - ## Define training arguments - training_args = TrainingArguments( - output_dir="output", - per_device_train_batch_size=args.train_batch_size, - learning_rate=lr, - num_train_epochs=args.num_epochs, - logging_strategy="steps", - logging_steps=10, - bf16=True, - ) - # Initialize our Trainer - trainer = Trainer( - model=model, - args=training_args, - train_dataset=tokenized_train_dataset, - tokenizer=tokenizer, - data_collator=DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False), - ) - - # Train the model - trainer.train() - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("--model_id", default="facebook/opt-125m", type=str) - parser.add_argument("--num_cores", default=8, type=int) - parser.add_argument("--num_epochs", default=3, type=int) - parser.add_argument("--train_batch_size", default=16, type=int) - parser.add_argument("--lr", default=1e-4, type=float) - args = parser.parse_args() - return args - - -def _mp_fn(index, args): - torch.set_default_dtype(torch.bfloat16) - train_model(args) - - -if __name__ == "__main__": - args = parse_args() - xmp.spawn(_mp_fn, args=(args,), nprocs=args.num_cores) diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/peft-lora-trl-dolly-clm.py b/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py similarity index 73% rename from examples/google-cloud-tpu-vm/causal-language-modeling/peft-lora-trl-dolly-clm.py rename to examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py index b36ca38a..fdd2cc23 100644 --- a/examples/google-cloud-tpu-vm/causal-language-modeling/peft-lora-trl-dolly-clm.py +++ b/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py @@ -3,7 +3,6 @@ import torch import torch_xla import torch_xla.core.xla_model as xm -import torch_xla.distributed.xla_multiprocessing as xmp from datasets import load_dataset from peft import LoraConfig, TaskType from transformers import ( @@ -15,8 +14,9 @@ from trl import SFTTrainer -def train_model(args): +def train_gemma(args): raw_dataset = load_dataset("databricks/databricks-dolly-15k", split="train") + model_id = "google/gemma-2b" def format_dolly(sample): instruction = f"### Instruction\n{sample['instruction']}" @@ -37,17 +37,13 @@ def format_dolly(sample): ) # Load Tokenizer - tokenizer = AutoTokenizer.from_pretrained(args.model_id) + tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token - # Scale learning rate to num cores - lr = args.lr * xm.xrt_world_size() device = xm.xla_device() # Load model - model = AutoModelForCausalLM.from_pretrained( - args.model_id, torch_dtype=torch.bfloat16 - ) + model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) lora_config = LoraConfig( r=16, target_modules=["q_proj", "v_proj"], @@ -56,15 +52,28 @@ def format_dolly(sample): lora_dropout=0.05, ) + # Set up the FSDP config. To enable FSDP via SPMD, set xla_fsdp_v2 to True. + fsdp_config = { + "fsdp_transformer_layer_cls_to_wrap": [ + "GemmaDecoderLayer" # Specify the layer to wrap according to the model's config + ], + "xla": True, + "xla_fsdp_v2": True, + "xla_fsdp_grad_ckpt": True, + } + # Define training arguments training_args = TrainingArguments( output_dir="output", per_device_train_batch_size=args.train_batch_size, - learning_rate=lr, + learning_rate=args.lr, num_train_epochs=args.num_epochs, logging_strategy="steps", - logging_steps=10, - bf16=True, + logging_steps=20, + save_steps=args.save_steps, + dataloader_drop_last=True, # Required for SPMD. + fsdp="full_shard", + fsdp_config=fsdp_config, ) # Initialize our Trainer @@ -84,20 +93,14 @@ def format_dolly(sample): def parse_args(): parser = argparse.ArgumentParser() - parser.add_argument("--model_id", default="facebook/opt-125m", type=str) - parser.add_argument("--num_cores", default=8, type=int) parser.add_argument("--num_epochs", default=3, type=int) parser.add_argument("--train_batch_size", default=16, type=int) parser.add_argument("--lr", default=3e-4, type=float) + parser.add_argument("--save_steps", default=100, type=int) args = parser.parse_args() return args -def _mp_fn(index, args): - torch.set_default_dtype(torch.bfloat16) - train_model(args) - - if __name__ == "__main__": args = parse_args() - xmp.spawn(_mp_fn, args=(args,), nprocs=args.num_cores) + train_gemma(args) From 68f94b4598e7a044772f44ccc76fb68627f48493 Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Wed, 28 Feb 2024 13:09:19 +0100 Subject: [PATCH 13/21] Add info to add token --- examples/google-cloud-tpu-vm/causal-language-modeling/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/README.md b/examples/google-cloud-tpu-vm/causal-language-modeling/README.md index 8c65440c..d9b79b69 100644 --- a/examples/google-cloud-tpu-vm/causal-language-modeling/README.md +++ b/examples/google-cloud-tpu-vm/causal-language-modeling/README.md @@ -95,6 +95,7 @@ Now, you can run the following commands to train the model: ```bash export PJRT_DEVICE=TPU XLA_USE_BF16=1 XLA_USE_SPMD=1 +export HF_TOKEN= cd /workspace python examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py \ --num_epochs 3 \ From 79872fdb492590184fe0f09f3c631534ca6daafa Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Thu, 29 Feb 2024 19:50:11 +0100 Subject: [PATCH 14/21] checkpointing is faster with this base image --- .../training/tpu/2.3/transformers/4.38.1/py310/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/containers/pytorch/training/tpu/2.3/transformers/4.38.1/py310/Dockerfile b/containers/pytorch/training/tpu/2.3/transformers/4.38.1/py310/Dockerfile index 829f24eb..eaa849f4 100644 --- a/containers/pytorch/training/tpu/2.3/transformers/4.38.1/py310/Dockerfile +++ b/containers/pytorch/training/tpu/2.3/transformers/4.38.1/py310/Dockerfile @@ -1,4 +1,4 @@ -FROM us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla@sha256:8f1dcd5b03f993e4da5c20d17c77aff6a5f22d5455f8eb042d2e4b16ac460526 +FROM us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla:nightly_3.10_tpuvm_20240229 # The nightly image with PyTorch = 2.3, Python=3.10 # Read more about it here: https://github.com/pytorch/xla?tab=readme-ov-file#docker From 2a80fe255b14e84b8190d9c59865258acbe7196a Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Thu, 14 Mar 2024 16:25:01 +0000 Subject: [PATCH 15/21] Build transformers and trl from main for checkpointing and sfttrainer fix --- .../{4.38.1 => 4.39.0.dev0}/py310/Dockerfile | 9 ++++++--- .../finetune-gemma-lora-dolly.py | 12 ++++++------ 2 files changed, 12 insertions(+), 9 deletions(-) rename containers/pytorch/training/tpu/2.3/transformers/{4.38.1 => 4.39.0.dev0}/py310/Dockerfile (88%) diff --git a/containers/pytorch/training/tpu/2.3/transformers/4.38.1/py310/Dockerfile b/containers/pytorch/training/tpu/2.3/transformers/4.39.0.dev0/py310/Dockerfile similarity index 88% rename from containers/pytorch/training/tpu/2.3/transformers/4.38.1/py310/Dockerfile rename to containers/pytorch/training/tpu/2.3/transformers/4.39.0.dev0/py310/Dockerfile index eaa849f4..d52df1c0 100644 --- a/containers/pytorch/training/tpu/2.3/transformers/4.38.1/py310/Dockerfile +++ b/containers/pytorch/training/tpu/2.3/transformers/4.39.0.dev0/py310/Dockerfile @@ -45,7 +45,10 @@ RUN pip install --upgrade --no-cache-dir \ evaluate==${EVALUATE} \ peft==${PEFT} \ trl==${TRL} \ - notebook==${NOTEBOOK} + notebook==${NOTEBOOK} + +RUN pip install --upgrade git+https://github.com/huggingface/transformers.git \ + git+https://github.com/huggingface/trl.git #Install Google Cloud Dependencies RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" \ @@ -64,6 +67,6 @@ RUN pip install --upgrade --no-cache-dir \ # Check if correct versions are installed RUN python -c "import transformers, diffusers, datasets, accelerate, evaluate, peft, trl, torch; \ - assert all([mod.__version__ == version for mod, version in [(transformers, '${TRANSFORMERS}'), (diffusers, '${DIFFUSERS}'), \ - (datasets, '${DATASETS}'), (accelerate, '${ACCELERATE}'), (evaluate, '${EVALUATE}'), (peft, '${PEFT}'), (trl, '${TRL}'), \ + assert all([mod.__version__ == version for mod, version in [(diffusers, '${DIFFUSERS}'), \ + (datasets, '${DATASETS}'), (accelerate, '${ACCELERATE}'), (evaluate, '${EVALUATE}'), (peft, '${PEFT}'), \ (torch, '2.3.0')]])" \ No newline at end of file diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py b/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py index fdd2cc23..d573993f 100644 --- a/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py +++ b/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py @@ -8,7 +8,6 @@ from transformers import ( AutoModelForCausalLM, AutoTokenizer, - DataCollatorForLanguageModeling, TrainingArguments, ) from trl import SFTTrainer @@ -45,10 +44,10 @@ def format_dolly(sample): # Load model model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) lora_config = LoraConfig( - r=16, + r=8, target_modules=["q_proj", "v_proj"], task_type=TaskType.CAUSAL_LM, - lora_alpha=32, + lora_alpha=16, lora_dropout=0.05, ) @@ -69,7 +68,7 @@ def format_dolly(sample): learning_rate=args.lr, num_train_epochs=args.num_epochs, logging_strategy="steps", - logging_steps=20, + logging_steps=args.logging_steps, save_steps=args.save_steps, dataloader_drop_last=True, # Required for SPMD. fsdp="full_shard", @@ -85,18 +84,19 @@ def format_dolly(sample): packing=True, train_dataset=format_dataset, tokenizer=tokenizer, - data_collator=DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False), ) # Train the model trainer.train() + trainer.save_model() def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--num_epochs", default=3, type=int) - parser.add_argument("--train_batch_size", default=16, type=int) + parser.add_argument("--train_batch_size", default=64, type=int) parser.add_argument("--lr", default=3e-4, type=float) parser.add_argument("--save_steps", default=100, type=int) + parser.add_argument("--logging_steps", default=20, type=int) args = parser.parse_args() return args From b53b2007324c88840b7624a430273c004b8ca418 Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Thu, 14 Mar 2024 16:31:36 +0000 Subject: [PATCH 16/21] Update README --- .../causal-language-modeling/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/README.md b/examples/google-cloud-tpu-vm/causal-language-modeling/README.md index d9b79b69..4b154e44 100644 --- a/examples/google-cloud-tpu-vm/causal-language-modeling/README.md +++ b/examples/google-cloud-tpu-vm/causal-language-modeling/README.md @@ -76,19 +76,19 @@ gcloud alpha compute tpus tpu-vm ssh dev-tpu-vm --zone=us-west4-a ``` -You now need to build the environment using Hugging Face's PyTorch TPU DLC [Dockerfile](https://github.com/huggingface/Google-Cloud-Containers/blob/feature/pytorch-tpu-container/containers/pytorch/training/tpu/2.1/transformers/4.37.2/py310/Dockerfile). You can use the following commands to build the environment: +You now need to build the environment using Hugging Face's PyTorch TPU DLC [Dockerfile](https://github.com/huggingface/Google-Cloud-Containers/blob/main/containers/pytorch/training/tpu/2.3/transformers/4.39.0.dev0/py310/Dockerfile). You can use the following commands to build the environment: ```bash git clone https://github.com/huggingface/Google-Cloud-Containers.git cd Google-Cloud-Containers -sudo docker build -t huggingface-pytorch-training-tpu-2.3.transformers.4.38.1.py310:latest -f containers/pytorch/training/tpu/2.3/transformers/4.38.1/py310/Dockerfile . +sudo docker build -t huggingface-pytorch-training-tpu-2.3.transformers.4.39.0.dev0.py310:latest -f containers/pytorch/training/tpu/2.3/transformers/4.39.0.dev0/py310/Dockerfile . ``` ## Train the model Once, the docker image is built, we need to run the docker container in order to activate the enviroment. You can use the following commands to run the docker container: ```bash -sudo docker run -it -v $(pwd):/workspace --privileged huggingface-pytorch-training-tpu-2.3.transformers.4.38.1.py310:latest bash +sudo docker run -it -v $(pwd):/workspace --privileged huggingface-pytorch-training-tpu-2.3.transformers.4.39.0.dev0.py310:latest bash ``` Now, you can run the following commands to train the model: @@ -99,6 +99,6 @@ export HF_TOKEN= cd /workspace python examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py \ --num_epochs 3 \ ---train_batch_size 16 \ +--train_batch_size 64 \ --lr 3e-4 ``` \ No newline at end of file From e75a3dde5da01622fbef3762830e8f210e32332d Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Thu, 14 Mar 2024 20:14:06 +0000 Subject: [PATCH 17/21] Change parameters --- .../causal-language-modeling/finetune-gemma-lora-dolly.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py b/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py index d573993f..738fd250 100644 --- a/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py +++ b/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py @@ -45,7 +45,7 @@ def format_dolly(sample): model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) lora_config = LoraConfig( r=8, - target_modules=["q_proj", "v_proj"], + target_modules="all-linear", task_type=TaskType.CAUSAL_LM, lora_alpha=16, lora_dropout=0.05, @@ -70,6 +70,10 @@ def format_dolly(sample): logging_strategy="steps", logging_steps=args.logging_steps, save_steps=args.save_steps, + bf16=True, + max_grad_norm=0.3, # max gradient norm based on QLoRA paper + warmup_ratio=0.03, # warmup ratio based on QLoRA paper + lr_scheduler_type="constant", dataloader_drop_last=True, # Required for SPMD. fsdp="full_shard", fsdp_config=fsdp_config, @@ -93,7 +97,7 @@ def format_dolly(sample): def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--num_epochs", default=3, type=int) - parser.add_argument("--train_batch_size", default=64, type=int) + parser.add_argument("--train_batch_size", default=32, type=int) parser.add_argument("--lr", default=3e-4, type=float) parser.add_argument("--save_steps", default=100, type=int) parser.add_argument("--logging_steps", default=20, type=int) From e3041ad1745ec284fdd8c97ff5e793c68e1d6052 Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Fri, 15 Mar 2024 08:06:26 +0000 Subject: [PATCH 18/21] Change paramters, add example for LLama --- .../finetune-gemma-lora-dolly.py | 14 ++- .../finetune-llama2-lora-guanaco.py | 87 +++++++++++++++++++ 2 files changed, 92 insertions(+), 9 deletions(-) create mode 100644 examples/google-cloud-tpu-vm/causal-language-modeling/finetune-llama2-lora-guanaco.py diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py b/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py index 738fd250..5cf4a5d4 100644 --- a/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py +++ b/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py @@ -5,11 +5,7 @@ import torch_xla.core.xla_model as xm from datasets import load_dataset from peft import LoraConfig, TaskType -from transformers import ( - AutoModelForCausalLM, - AutoTokenizer, - TrainingArguments, -) +from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments from trl import SFTTrainer @@ -71,9 +67,9 @@ def format_dolly(sample): logging_steps=args.logging_steps, save_steps=args.save_steps, bf16=True, - max_grad_norm=0.3, # max gradient norm based on QLoRA paper - warmup_ratio=0.03, # warmup ratio based on QLoRA paper - lr_scheduler_type="constant", + max_grad_norm=0.3, # max gradient norm based on QLoRA paper + warmup_ratio=0.03, # warmup ratio based on QLoRA paper + lr_scheduler_type="constant", dataloader_drop_last=True, # Required for SPMD. fsdp="full_shard", fsdp_config=fsdp_config, @@ -100,7 +96,7 @@ def parse_args(): parser.add_argument("--train_batch_size", default=32, type=int) parser.add_argument("--lr", default=3e-4, type=float) parser.add_argument("--save_steps", default=100, type=int) - parser.add_argument("--logging_steps", default=20, type=int) + parser.add_argument("--logging_steps", default=1, type=int) args = parser.parse_args() return args diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-llama2-lora-guanaco.py b/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-llama2-lora-guanaco.py new file mode 100644 index 00000000..b64e2f40 --- /dev/null +++ b/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-llama2-lora-guanaco.py @@ -0,0 +1,87 @@ +import argparse + +import torch +import torch_xla +import torch_xla.core.xla_model as xm +from datasets import load_dataset +from peft import LoraConfig, TaskType +from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments +from trl import SFTTrainer + + +def train_gemma(args): + raw_dataset = load_dataset("timdettmers/openassistant-guanaco", split="train") + model_id = "meta-llama/Llama-2-7b-hf" + + # Load Tokenizer + tokenizer = AutoTokenizer.from_pretrained(model_id) + tokenizer.pad_token = tokenizer.eos_token + + device = xm.xla_device() + + # Load model + model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) + lora_config = LoraConfig( + r=8, + task_type=TaskType.CAUSAL_LM, + lora_alpha=16, + lora_dropout=0.05, + ) + + # Set up the FSDP config. To enable FSDP via SPMD, set xla_fsdp_v2 to True. + fsdp_config = { + "fsdp_transformer_layer_cls_to_wrap": [ + "LlamaDecoderLayer" # Specify the layer to wrap according to the model's config + ], + "xla": True, + "xla_fsdp_v2": True, + "xla_fsdp_grad_ckpt": True, + } + + # Define training arguments + training_args = TrainingArguments( + output_dir="output", + per_device_train_batch_size=args.train_batch_size, + learning_rate=args.lr, + num_train_epochs=args.num_epochs, + logging_strategy="steps", + logging_steps=args.logging_steps, + save_steps=args.save_steps, + bf16=True, + max_grad_norm=0.3, # max gradient norm based on QLoRA paper + warmup_ratio=0.03, # warmup ratio based on QLoRA paper + lr_scheduler_type="constant", + dataloader_drop_last=True, # Required for SPMD. + fsdp="full_shard", + fsdp_config=fsdp_config, + ) + + # Initialize our Trainer + trainer = SFTTrainer( + model=model, + peft_config=lora_config, + args=training_args, + dataset_text_field="text", + packing=True, + train_dataset=raw_dataset, + tokenizer=tokenizer, + ) + # Train the model + trainer.train() + trainer.save_model() + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--num_epochs", default=3, type=int) + parser.add_argument("--train_batch_size", default=16, type=int) + parser.add_argument("--lr", default=3e-4, type=float) + parser.add_argument("--save_steps", default=100, type=int) + parser.add_argument("--logging_steps", default=1, type=int) + args = parser.parse_args() + return args + + +if __name__ == "__main__": + args = parse_args() + train_gemma(args) From 7f2e378828c40cf9a3d06e8c418c457d112030a3 Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Fri, 15 Mar 2024 08:10:30 +0000 Subject: [PATCH 19/21] Rename folder --- .../causal-language-modeling/README.md | 4 ++-- .../causal-language-modeling/finetune-gemma-lora-dolly.py | 0 .../causal-language-modeling/finetune-llama2-lora-guanaco.py | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename examples/{google-cloud-tpu-vm => tpu-examples}/causal-language-modeling/README.md (97%) rename examples/{google-cloud-tpu-vm => tpu-examples}/causal-language-modeling/finetune-gemma-lora-dolly.py (100%) rename examples/{google-cloud-tpu-vm => tpu-examples}/causal-language-modeling/finetune-llama2-lora-guanaco.py (100%) diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/README.md b/examples/tpu-examples/causal-language-modeling/README.md similarity index 97% rename from examples/google-cloud-tpu-vm/causal-language-modeling/README.md rename to examples/tpu-examples/causal-language-modeling/README.md index 4b154e44..f694cd31 100644 --- a/examples/google-cloud-tpu-vm/causal-language-modeling/README.md +++ b/examples/tpu-examples/causal-language-modeling/README.md @@ -97,8 +97,8 @@ Now, you can run the following commands to train the model: export PJRT_DEVICE=TPU XLA_USE_BF16=1 XLA_USE_SPMD=1 export HF_TOKEN= cd /workspace -python examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py \ +python examples/tpu-examples/causal-language-modeling/finetune-gemma-lora-dolly.py \ --num_epochs 3 \ ---train_batch_size 64 \ +--train_batch_size 32 \ --lr 3e-4 ``` \ No newline at end of file diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py b/examples/tpu-examples/causal-language-modeling/finetune-gemma-lora-dolly.py similarity index 100% rename from examples/google-cloud-tpu-vm/causal-language-modeling/finetune-gemma-lora-dolly.py rename to examples/tpu-examples/causal-language-modeling/finetune-gemma-lora-dolly.py diff --git a/examples/google-cloud-tpu-vm/causal-language-modeling/finetune-llama2-lora-guanaco.py b/examples/tpu-examples/causal-language-modeling/finetune-llama2-lora-guanaco.py similarity index 100% rename from examples/google-cloud-tpu-vm/causal-language-modeling/finetune-llama2-lora-guanaco.py rename to examples/tpu-examples/causal-language-modeling/finetune-llama2-lora-guanaco.py From 6bb2197f726a07282ab12b84f4fa20a1a2ef7961 Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Fri, 15 Mar 2024 09:01:15 +0000 Subject: [PATCH 20/21] llama-example: name of function from train_gemma to train_llama --- .../causal-language-modeling/finetune-llama2-lora-guanaco.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/tpu-examples/causal-language-modeling/finetune-llama2-lora-guanaco.py b/examples/tpu-examples/causal-language-modeling/finetune-llama2-lora-guanaco.py index b64e2f40..5da11a6a 100644 --- a/examples/tpu-examples/causal-language-modeling/finetune-llama2-lora-guanaco.py +++ b/examples/tpu-examples/causal-language-modeling/finetune-llama2-lora-guanaco.py @@ -9,7 +9,7 @@ from trl import SFTTrainer -def train_gemma(args): +def train_llama(args): raw_dataset = load_dataset("timdettmers/openassistant-guanaco", split="train") model_id = "meta-llama/Llama-2-7b-hf" @@ -84,4 +84,4 @@ def parse_args(): if __name__ == "__main__": args = parse_args() - train_gemma(args) + train_llama(args) From b119483817d4550914c5ba5153f3f630e638306f Mon Sep 17 00:00:00 2001 From: Shubham Krishna Date: Mon, 25 Mar 2024 13:00:37 +0000 Subject: [PATCH 21/21] update(tpu-examples): change model to gemma-7b, add inference at the end of training. --- .../transformers/4.39.0.dev0/py310/Dockerfile | 21 +++++----- .../causal-language-modeling/README.md | 14 +++---- .../finetune-gemma-lora-dolly.py | 38 ++++++++++++----- .../finetune-llama2-lora-guanaco.py | 42 +++++++++++++------ 4 files changed, 76 insertions(+), 39 deletions(-) diff --git a/containers/pytorch/training/tpu/2.3/transformers/4.39.0.dev0/py310/Dockerfile b/containers/pytorch/training/tpu/2.3/transformers/4.39.0.dev0/py310/Dockerfile index d52df1c0..81f92c3a 100644 --- a/containers/pytorch/training/tpu/2.3/transformers/4.39.0.dev0/py310/Dockerfile +++ b/containers/pytorch/training/tpu/2.3/transformers/4.39.0.dev0/py310/Dockerfile @@ -6,11 +6,11 @@ LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive # Versions -ARG TRANSFORMERS='4.38.1' -ARG DIFFUSERS='0.26.3' -ARG PEFT='0.8.2' -ARG TRL='0.7.11' -ARG DATASETS='2.17.1' +ARG TRANSFORMERS='4.39.0' +ARG DIFFUSERS='0.27.1' +ARG PEFT='0.9.0' +ARG TRL='0.8.1' +ARG DATASETS='2.18.0' ARG ACCELERATE='0.27.2' ARG EVALUATE='0.4.1' ARG NOTEBOOK='7.1.1' @@ -45,10 +45,11 @@ RUN pip install --upgrade --no-cache-dir \ evaluate==${EVALUATE} \ peft==${PEFT} \ trl==${TRL} \ - notebook==${NOTEBOOK} - -RUN pip install --upgrade git+https://github.com/huggingface/transformers.git \ - git+https://github.com/huggingface/trl.git + notebook==${NOTEBOOK} + +# fix about saving checkpoints on TPU when using FSDPv2 +# can be removed once https://github.com/huggingface/transformers/pull/29780 is merged and new release is made +RUN pip install --upgrade git+https://github.com/shub-kris/transformers.git@fix/checkpointing-on-tpu-with-fsdpv2 #Install Google Cloud Dependencies RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" \ @@ -68,5 +69,5 @@ RUN pip install --upgrade --no-cache-dir \ # Check if correct versions are installed RUN python -c "import transformers, diffusers, datasets, accelerate, evaluate, peft, trl, torch; \ assert all([mod.__version__ == version for mod, version in [(diffusers, '${DIFFUSERS}'), \ - (datasets, '${DATASETS}'), (accelerate, '${ACCELERATE}'), (evaluate, '${EVALUATE}'), (peft, '${PEFT}'), \ + (datasets, '${DATASETS}'), (accelerate, '${ACCELERATE}'), (evaluate, '${EVALUATE}'), (peft, '${PEFT}'), (trl, '${TRL}'), \ (torch, '2.3.0')]])" \ No newline at end of file diff --git a/examples/tpu-examples/causal-language-modeling/README.md b/examples/tpu-examples/causal-language-modeling/README.md index f694cd31..003c2fe0 100644 --- a/examples/tpu-examples/causal-language-modeling/README.md +++ b/examples/tpu-examples/causal-language-modeling/README.md @@ -1,6 +1,6 @@ -# Finetune Gemma-2B using Hugging Face PyTorch TPU DLC on Google Cloud TPU(v5e) +# Finetune Gemma-2B using Hugging Face PyTorch TPU DLC on Google Cloud TPU(v3-8) -This example demonstrates how to finetune [gemma-2b](https://huggingface.co/google/gemma-2b) using Hugging Face's DLCs on Google Cloud single-host TPU(v5e) VM. We use the [transformers](https://huggingface.co/docs/transformers/), [TRL](https://huggingface.co/docs/trl/en/index), and [PEFT](https://huggingface.co/docs/peft/index) library to fine-tune. The dataset used for this example is the [Dolly-15k](databricks/databricks-dolly-15k) dataset which can be easily accessed from Hugging Face's [Datasets](https://huggingface.co/datasets) Hub. +This example demonstrates how to finetune [gemma-7b](https://huggingface.co/google/gemma-7b) using Hugging Face's DLCs on Google Cloud single-host TPU(v3-8) VM. We use the [transformers](https://huggingface.co/docs/transformers/), [TRL](https://huggingface.co/docs/trl/en/index), and [PEFT](https://huggingface.co/docs/peft/index) library to fine-tune. The dataset used for this example is the [Dolly-15k](databricks/databricks-dolly-15k) dataset which can be easily accessed from Hugging Face's [Datasets](https://huggingface.co/datasets) Hub. ## What are TPUs? @@ -47,7 +47,7 @@ gcloud services enable tpu.googleapis.com ## Spin up a TPU VM on Google Cloud -We will be using [Cloud TPU v5e](https://cloud.google.com/tpu/docs/v5e-training), Google Cloud's latest generation AI accelerator. We will setup a single-host TPU(v5e) VM to train the model. +We will be using [Cloud TPU v3-8](https://cloud.google.com/tpu/docs/v3), Google Cloud's generation AI accelerator. We will setup a single-host TPU(v3-8) VM to train the model. You can read more about Single-host(8 chips) and Multi-host(> 8 chips) TPU VMs on [Google Cloud TPU configurations](https://cloud.google.com/tpu/docs/supported-tpu-configurations). @@ -58,10 +58,10 @@ To [set up a TPU VM](https://cloud.google.com/tpu/docs/setup-gcp-account#set-up- ```bash -gcloud alpha compute tpus tpu-vm create dev-tpu-vm \ +gcloud compute tpus tpu-vm create dev-tpu-vm \ --zone=us-west4-a \ ---accelerator-type=v5litepod-8 \ ---version v2-alpha-tpuv5-lite +--accelerator-type=v3-8 \ +--version tpu-ubuntu2204-base ``` After some time, the TPU VM will be created. You can see the list of TPU VMs in [Google Cloud console](https://console.cloud.google.com/compute/tpus). @@ -72,7 +72,7 @@ After some time, the TPU VM will be created. You can see the list of TPU VMs in Once, the Cloud TPU VM is up and running, you can SSH into the VM using the following command: ```bash -gcloud alpha compute tpus tpu-vm ssh dev-tpu-vm --zone=us-west4-a +gcloud compute tpus tpu-vm ssh dev-tpu-vm --zone=us-west4-a ``` diff --git a/examples/tpu-examples/causal-language-modeling/finetune-gemma-lora-dolly.py b/examples/tpu-examples/causal-language-modeling/finetune-gemma-lora-dolly.py index 5cf4a5d4..f979deff 100644 --- a/examples/tpu-examples/causal-language-modeling/finetune-gemma-lora-dolly.py +++ b/examples/tpu-examples/causal-language-modeling/finetune-gemma-lora-dolly.py @@ -2,16 +2,32 @@ import torch import torch_xla -import torch_xla.core.xla_model as xm from datasets import load_dataset -from peft import LoraConfig, TaskType +from peft import LoraConfig, PeftModel, TaskType from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments from trl import SFTTrainer +def inference(model, tokenizer): + prompts = [ + "Why can camels survive for long without water?", + "Are the following items candy bars or gum: trident, Twix, hubba bubba, snickers, three musketeers, and wrigleys.", + ] + + for prompt in prompts: + text = f"### Instruction\n {prompt}" + device = "cpu" + inputs = tokenizer(text, return_tensors="pt").to(device) + outputs = model.generate( + **inputs, max_new_tokens=50 + ) # model.generate only supported on GPU and CPU + print(tokenizer.decode(outputs[0], skip_special_tokens=True)) + print("\n\n") + + def train_gemma(args): raw_dataset = load_dataset("databricks/databricks-dolly-15k", split="train") - model_id = "google/gemma-2b" + model_id = args.model_id def format_dolly(sample): instruction = f"### Instruction\n{sample['instruction']}" @@ -33,9 +49,7 @@ def format_dolly(sample): # Load Tokenizer tokenizer = AutoTokenizer.from_pretrained(model_id) - tokenizer.pad_token = tokenizer.eos_token - - device = xm.xla_device() + tokenizer.padding_side = "right" # to prevent warnings # Load model model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) @@ -66,10 +80,8 @@ def format_dolly(sample): logging_strategy="steps", logging_steps=args.logging_steps, save_steps=args.save_steps, + optim="adafactor", bf16=True, - max_grad_norm=0.3, # max gradient norm based on QLoRA paper - warmup_ratio=0.03, # warmup ratio based on QLoRA paper - lr_scheduler_type="constant", dataloader_drop_last=True, # Required for SPMD. fsdp="full_shard", fsdp_config=fsdp_config, @@ -89,13 +101,19 @@ def format_dolly(sample): trainer.train() trainer.save_model() + # Inference + model = AutoModelForCausalLM.from_pretrained(model_id) + trained_peft_model = PeftModel.from_pretrained(model, "output") + inference(trained_peft_model, tokenizer) + def parse_args(): parser = argparse.ArgumentParser() + parser.add_argument("--model_id", default="google/gemma-7b", type=str) parser.add_argument("--num_epochs", default=3, type=int) parser.add_argument("--train_batch_size", default=32, type=int) parser.add_argument("--lr", default=3e-4, type=float) - parser.add_argument("--save_steps", default=100, type=int) + parser.add_argument("--save_steps", default=500, type=int) parser.add_argument("--logging_steps", default=1, type=int) args = parser.parse_args() return args diff --git a/examples/tpu-examples/causal-language-modeling/finetune-llama2-lora-guanaco.py b/examples/tpu-examples/causal-language-modeling/finetune-llama2-lora-guanaco.py index 5da11a6a..b81ab03e 100644 --- a/examples/tpu-examples/causal-language-modeling/finetune-llama2-lora-guanaco.py +++ b/examples/tpu-examples/causal-language-modeling/finetune-llama2-lora-guanaco.py @@ -2,22 +2,37 @@ import torch import torch_xla -import torch_xla.core.xla_model as xm from datasets import load_dataset -from peft import LoraConfig, TaskType +from peft import LoraConfig, PeftModel, TaskType from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments from trl import SFTTrainer +def inference(model, tokenizer): + prompts = [ + "### Human: From now on, you will act as a nutritionist. I will ask questions about nutrition and you will reply with an explanation on how I can apply it to my daily basis. My first request: What is the main benefit of doing intermittent fastening regularly?", + "### Human: Was kannst Du im Vergleich zu anderen Large Language Models?", + ] + + for prompt in prompts: + device = "cpu" + inputs = tokenizer(prompt, return_tensors="pt").to(device) + outputs = model.generate( + **inputs, max_new_tokens=50 + ) # model.generate only supported on GPU and CPU + print(tokenizer.decode(outputs[0], skip_special_tokens=True)) + print("\n\n") + + def train_llama(args): raw_dataset = load_dataset("timdettmers/openassistant-guanaco", split="train") - model_id = "meta-llama/Llama-2-7b-hf" + model_id = args.model_id # Load Tokenizer tokenizer = AutoTokenizer.from_pretrained(model_id) - tokenizer.pad_token = tokenizer.eos_token - - device = xm.xla_device() + tokenizer.pad_token = ( + tokenizer.eos_token + ) # Set the padding token to be the same as the end-of-sequence token # Load model model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) @@ -48,9 +63,6 @@ def train_llama(args): logging_steps=args.logging_steps, save_steps=args.save_steps, bf16=True, - max_grad_norm=0.3, # max gradient norm based on QLoRA paper - warmup_ratio=0.03, # warmup ratio based on QLoRA paper - lr_scheduler_type="constant", dataloader_drop_last=True, # Required for SPMD. fsdp="full_shard", fsdp_config=fsdp_config, @@ -70,13 +82,19 @@ def train_llama(args): trainer.train() trainer.save_model() + # Inference + model = AutoModelForCausalLM.from_pretrained(model_id) + trained_peft_model = PeftModel.from_pretrained(model, "output") + inference(trained_peft_model, tokenizer) + def parse_args(): parser = argparse.ArgumentParser() + parser.add_argument("--model-id", default="meta-llama/Llama-2-7b-hf", type=str) parser.add_argument("--num_epochs", default=3, type=int) - parser.add_argument("--train_batch_size", default=16, type=int) - parser.add_argument("--lr", default=3e-4, type=float) - parser.add_argument("--save_steps", default=100, type=int) + parser.add_argument("--train_batch_size", default=32, type=int) + parser.add_argument("--lr", default=2e-4, type=float) + parser.add_argument("--save_steps", default=500, type=int) parser.add_argument("--logging_steps", default=1, type=int) args = parser.parse_args() return args