diff --git a/intel_extension_for_transformers/neural_chat/docker/Dockerfile b/intel_extension_for_transformers/neural_chat/docker/Dockerfile index 5329f56a22a..fed3fce8351 100644 --- a/intel_extension_for_transformers/neural_chat/docker/Dockerfile +++ b/intel_extension_for_transformers/neural_chat/docker/Dockerfile @@ -144,3 +144,49 @@ RUN cd /intel-extension-for-transformers/intel_extension_for_transformers/neural WORKDIR /intel-extension-for-transformers/intel_extension_for_transformers/neural_chat/ +## NVIDIA GPU environment +FROM nvidia/cuda:12.2.2-runtime-ubuntu22.04 as nvgpu + +ARG ITREX_VER=main +ARG PYTHON_VERSION=3.9 +ARG REPO=https://github.com/intel/intel-extension-for-transformers.git + +# See http://bugs.python.org/issue19846 +ENV LANG C.UTF-8 + +# Install system dependencies +SHELL ["/bin/bash", "--login", "-c"] +RUN apt-get update \ + && apt-get install -y build-essential \ + && apt-get install -y wget numactl git \ + && apt-get install -y openssh-server \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Download ITREX code +RUN git clone --single-branch --branch=${ITREX_VER} ${REPO} intel-extension-for-transformers + +# Install miniconda +ENV CONDA_DIR /opt/conda +RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \ + /bin/bash ~/miniconda.sh -b -p /opt/conda + +# Put conda in path so we can use conda activate +SHELL ["/bin/bash", "--login", "-c"] +ENV PATH=$CONDA_DIR/bin:$PATH +RUN conda init bash && \ + unset -f conda && \ + export PATH=$CONDA_DIR/bin/:${PATH} && \ + conda config --add channels intel && \ + conda create -yn neuralchat python=${PYTHON_VERSION} && \ + echo "conda activate neuralchat" >> ~/.bashrc && \ + echo "export PYTHONPATH=/intel-extension-for-transformers" >> ~/.bashrc && \ + source ~/.bashrc + +RUN source activate && conda activate neuralchat && cd /intel-extension-for-transformers && \ + pip install -r ./intel_extension_for_transformers/neural_chat/examples/instruction_tuning/requirements.txt && \ + pip install -r ./intel_extension_for_transformers/neural_chat/requirements.txt + +WORKDIR /intel-extension-for-transformers/intel_extension_for_transformers/neural_chat/ + +CMD ["/usr/sbin/sshd", "-D"] \ No newline at end of file diff --git a/intel_extension_for_transformers/neural_chat/docker/finetuning/README.md b/intel_extension_for_transformers/neural_chat/docker/finetuning/README.md index 3197ef4a8bf..2b61061d651 100644 --- a/intel_extension_for_transformers/neural_chat/docker/finetuning/README.md +++ b/intel_extension_for_transformers/neural_chat/docker/finetuning/README.md @@ -60,6 +60,20 @@ If you don't need to set proxy settings: docker build --build-arg UBUNTU_VER=22.04 -f /path/to/workspace/intel-extension-for-transformers/intel_extension_for_transformers/neural_chat/docker/finetuning/Dockerfile -t chatbot_finetune . --target hpu ``` +### On Nvidia GPU Environment + +If you need to set proxy settings: + +```bash +docker build --network=host --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f /path/to/workspace/intel-extension-for-transformers/intel_extension_for_transformers/neural_chat/docker/Dockerfile -t chatbot_finetune . --target nvgpu +``` + +If you don't need to set proxy settings: + +```bash +docker build -f /path/to/workspace/intel-extension-for-transformers/intel_extension_for_transformers/neural_chat/docker/Dockerfile -t chatbot_finetune . --target nvgpu +``` + ## 5. Create Docker Container Before creating your docker container, make sure the model has been downloaded to local. @@ -72,6 +86,10 @@ docker run -it --disable-content-trust --privileged --name="chatbot" --hostname= ```bash docker run -it --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e https_proxy -e http_proxy -e HTTPS_PROXY -e HTTP_PROXY -e no_proxy -e NO_PROXY -v /dev/shm:/dev/shm -v /absolute/path/to/flan-t5-xl:/flan -v /absolute/path/to/alpaca_data.json:/dataset/alpaca_data.json --cap-add=sys_nice --net=host --ipc=host chatbot_finetuning:latest ``` +### On Nvidia GPU Environment +```bash +docker run --gpus all -it --disable-content-trust --privileged --name="chatbot" --hostname="chatbot-container" --network=host -e https_proxy -e http_proxy -e HTTPS_PROXY -e HTTP_PROXY -e no_proxy -e NO_PROXY -v /dev/shm:/dev/shm -v /absolute/path/to/flan-t5-xl:/flan -v /absolute/path/to/alpaca_data.json:/dataset/alpaca_data.json "chatbot_finetune" +``` # Finetune