From e19c9ffc9a75363415dbce9fa83e75a5355cd427 Mon Sep 17 00:00:00 2001 From: rohithkrn Date: Sat, 30 Jul 2022 20:20:47 -0700 Subject: [PATCH 1/3] deprecate neuron Dockerfile in favor in DLC --- docker/Dockerfile.neuron.dev | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/docker/Dockerfile.neuron.dev b/docker/Dockerfile.neuron.dev index ce31c434c0..3b38c9b501 100644 --- a/docker/Dockerfile.neuron.dev +++ b/docker/Dockerfile.neuron.dev @@ -1,13 +1,18 @@ +## THIS DOCKERFILE HAS BEEN DEPRECATED ## +# please refer to deep learning containers repository for torchserve containers +# to run on inferentia processors which has up to date drivers +# https://github.com/aws/deep-learning-containers/blob/master/available_images.md + # syntax = docker/dockerfile:experimental # # Following comments have been shamelessly copied from https://github.com/pytorch/pytorch/blob/master/Dockerfile -# +# # NOTE: To build this you will need a docker version > 18.06 with # experimental enabled and DOCKER_BUILDKIT=1 # # If you do not use buildkit you are not going to have a good time # -# For reference: +# For reference: # https://docs.docker.com/develop/develop-images/build_enhancements/ ARG BASE_IMAGE=ubuntu:18.04 @@ -74,7 +79,7 @@ RUN if [ "$MACHINE_TYPE" = "gpu" ]; then export USE_CUDA=1; fi \ && chown -R model-server /home/model-server \ && cp docker/config.properties /home/model-server/config.properties \ && mkdir /home/model-server/model-store && chown -R model-server /home/model-server/model-store \ - && pip install torch-neuron 'neuron-cc[tensorflow]' --extra-index-url=https://pip.repos.neuron.amazonaws.com + && pip install torch-neuron 'neuron-cc[tensorflow]' --extra-index-url=https://pip.repos.neuron.amazonaws.com EXPOSE 8080 8081 8082 7070 7071 USER model-server From 4f14b959c4d06be6472e0cc392a09c2d6447bf6c Mon Sep 17 00:00:00 2001 From: rohithkrn Date: Tue, 2 Aug 2022 13:35:22 -0700 Subject: [PATCH 2/3] add deprecation notice in README --- docker/README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docker/README.md b/docker/README.md index bc61bef547..478dcf6a58 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,3 +1,6 @@ +### Deprecation notice: +[Dockerfile.neuron.dev](https://github.com/pytorch/serve/blob/master/docker/Dockerfile.neuron.dev) has been deprecated. Please refer to [deep learning containers](https://github.com/aws/deep-learning-containers/blob/master/available_images.md) repository for neuron torchserve containers. + ## Contents of this Document * [Prerequisites](#prerequisites) @@ -12,7 +15,7 @@ * For base Ubuntu with GPU, install following nvidia container toolkit and driver- * [Nvidia container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian) * [Nvidia driver](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/install-nvidia-driver.html) - + * NOTE - Dockerfiles have not been tested on windows native platform. ## First things first @@ -283,7 +286,7 @@ You may want to consider the following aspects / docker options when deploying t The current ulimit values can be viewed by executing ```ulimit -a```. A more exhaustive set of options for resource constraining can be found in the Docker Documentation [here](https://docs.docker.com/config/containers/resource_constraints/), [here](https://docs.docker.com/engine/reference/commandline/run/#set-ulimits-in-container---ulimit) and [here](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) * Exposing specific ports / volumes between the host & docker env. - * ```-p8080:8080 -p8081:8081 -p 8082:8082 -p 7070:7070 -p 7071:7071 ``` + * ```-p8080:8080 -p8081:8081 -p 8082:8082 -p 7070:7070 -p 7071:7071 ``` TorchServe uses default ports 8080 / 8081 / 8082 for REST based inference, management & metrics APIs and 7070 / 7071 for gRPC APIs. You may want to expose these ports to the host for HTTP & gRPC Requests between Docker & Host. * The model store is passed to torchserve with the --model-store option. You may want to consider using a shared volume if you prefer pre populating models in model-store directory. @@ -298,5 +301,5 @@ docker run --rm --shm-size=1g \ -p8082:8082 \ -p7070:7070 \ -p7071:7071 \ - --mount type=bind,source=/path/to/model/store,target=/tmp/models torchserve --model-store=/tmp/models + --mount type=bind,source=/path/to/model/store,target=/tmp/models torchserve --model-store=/tmp/models ``` From cf855f35f4d8309b1b5253617168d72dd475c967 Mon Sep 17 00:00:00 2001 From: rohithkrn Date: Wed, 10 Aug 2022 19:11:32 -0700 Subject: [PATCH 3/3] build trigger