diff --git a/tutorials/nlp/lora.ipynb b/tutorials/nlp/lora.ipynb index 0694f1718bd3..006199e4fb67 100644 --- a/tutorials/nlp/lora.ipynb +++ b/tutorials/nlp/lora.ipynb @@ -2,32 +2,35 @@ "cells": [ { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "Currently, this notebook must be run in a NeMo container.\n", "An example command to launch the container:\n", "```bash\n", "docker run --gpus all -it --rm -v :/NeMo --shm-size=8g -p 8888:8888 -p 6006:6006 --ulimit memlock=-1 --ulimit stack=67108864 \n", "```" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# Update megatron version to the newest.\n", "!cd /workspace && python -m pip install -e git+https://github.com/NVIDIA/Megatron-LM#egg=megatron-core" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "%cd /NeMo/tutorials/nlp\n", @@ -36,10 +39,7 @@ "import wget\n", "import sys\n", "sys.path.insert(0, \"../..\") # find the local nemo first before the installed nemo" - ], - "metadata": { - "collapsed": false - } + ] }, { "attachments": {}, @@ -325,13 +325,13 @@ { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "!wget -nc --content-disposition {megatron_gpt_345m_nemo_url} -O {NEMO_DIR}/{gpt_file_name}" - ], - "metadata": { - "collapsed": false - } + ] }, { "attachments": {}, @@ -537,6 +537,9 @@ }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "Simply substitute with the `MegatronT5SFTModel` class to use T5 instead of GPT.\n", "\n", @@ -544,10 +547,7 @@ "`model.add_adapter([LoraPEFTConfig(model_cfg), PtuningPEFTConfig(model_cfg)])`\n", "\n", "We're now ready to start training." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", @@ -597,6 +597,9 @@ { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# reimport libraries and classes in case one wants to only run cells from the Inference section\n", @@ -612,31 +615,28 @@ "DATA_DIR = \"data\"\n", "CONFIG_DIR = os.path.join(NEMO_DIR, \"conf\")\n", "SQUAD_DIR = os.path.join(DATA_DIR, \"SQuAD\")\n" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", - "source": [ - "First, we will load and modify a config file that will be used for inference.\n" - ], "metadata": { "collapsed": false - } + }, + "source": [ + "First, we will load and modify a config file that will be used for inference.\n" + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# Download the example config file\n", "wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/language_modeling/tuning/conf/megatron_gpt_generate_config.yaml', CONFIG_DIR)" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", @@ -711,30 +711,30 @@ }, { "cell_type": "markdown", - "source": [ - "The cell below is required if you are running the notebook end-to-end, and if you use a different batch size for training and evaluation. In this case, the microbatch calculator needs to be rest. If you are running training only or inference only, feel free to ignore this cell." - ], "metadata": { "collapsed": false - } + }, + "source": [ + "The cell below is required if you are running the notebook end-to-end, and if you use a different batch size for training and evaluation. In this case, the microbatch calculator needs to be rest. If you are running training only or inference only, feel free to ignore this cell." + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ - "from nemo.utils.apex_utils import _reconfigure_microbatch_calculator\n", - "_reconfigure_microbatch_calculator(\n", + "from megatron.core.num_microbatches_calculator import reconfigure_num_microbatches_calculator\n", + "reconfigure_num_microbatches_calculator(\n", " rank=0,\n", " rampup_batch_size=None,\n", " global_batch_size=config_eval.model.global_batch_size,\n", " micro_batch_size=config_eval.model.micro_batch_size,\n", " data_parallel_size=1,\n", ")" - ], - "metadata": { - "collapsed": false - } + ] }, { "attachments": {},