This repository has been archived by the owner on Oct 25, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 211
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[NeuralChat] Support Neuralchat-TGI serving with Docker (#1208)
Signed-off-by: LetongHan <letong.han@intel.com> Co-authored-by: VincyZhang <wenxin.zhang@intel.com>
- Loading branch information
1 parent
ac0ea1e
commit 8ebff39
Showing
7 changed files
with
198 additions
and
4 deletions.
There are no files selected for viewing
97 changes: 97 additions & 0 deletions
97
intel_extension_for_transformers/neural_chat/docker/tgi_serving/Dockerfile_tgi
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,97 @@ | ||
# Copyright (c) 2023 Intel Corporation | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# ============================================================================ | ||
# | ||
# THIS IS A GENERATED DOCKERFILE. | ||
# | ||
# This file was assembled from multiple pieces, whose use is documented | ||
# throughout. Please refer to the TensorFlow dockerfiles documentation | ||
# for more information. | ||
# | ||
# ============================================================================ | ||
|
||
|
||
## SPR environment | ||
ARG UBUNTU_VER=22.04 | ||
FROM ubuntu:${UBUNTU_VER} as cpu | ||
|
||
ARG ITREX_VER=main | ||
ARG PYTHON_VERSION=3.10 | ||
ARG REPO=https://github.com/intel/intel-extension-for-transformers.git | ||
ARG REPO_PATH="" | ||
ARG SSHD_PORT=22 | ||
ENV SSHD_PORT ${SSHD_PORT} | ||
|
||
# See http://bugs.python.org/issue19846 | ||
ENV LANG C.UTF-8 | ||
|
||
# Install system dependencies | ||
SHELL ["/bin/bash", "--login", "-c"] | ||
RUN apt update \ | ||
&& apt install -y build-essential wget numactl git openssh-server libgl1-mesa-glx libjemalloc2 google-perftools \ | ||
&& apt install -y python${PYTHON_VERSION} python3-pip \ | ||
&& pip install --upgrade pip setuptools wheel \ | ||
&& apt-get clean \ | ||
&& rm -rf /var/lib/apt/lists/* | ||
|
||
RUN ln -s /usr/bin/python3 /usr/bin/python | ||
|
||
# install Docker Client and dependencies | ||
RUN apt-get update && apt-get install -y \ | ||
apt-transport-https \ | ||
ca-certificates \ | ||
curl \ | ||
gnupg-agent \ | ||
software-properties-common | ||
|
||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - | ||
|
||
RUN add-apt-repository \ | ||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \ | ||
$(lsb_release -cs) \ | ||
stable" | ||
|
||
RUN apt-get update && apt-get install -y docker-ce-cli | ||
|
||
# Download ITREX code | ||
RUN mkdir -p /intel-extension-for-transformers | ||
COPY ${REPO_PATH} /intel-extension-for-transformers | ||
RUN if [ "$REPO_PATH" == "" ]; then rm -rf intel-extension-for-transformers/* && rm -rf intel-extension-for-transformers/.* ; git clone --single-branch --branch=${ITREX_VER} ${REPO} intel-extension-for-transformers ; fi | ||
WORKDIR /intel-extension-for-transformers | ||
|
||
RUN pip install oneccl_bind_pt --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/ && \ | ||
cd /intel-extension-for-transformers && pip install -r requirements.txt && \ | ||
pip install -v . && \ | ||
cd ./intel_extension_for_transformers/neural_chat/examples/finetuning/instruction && pip install -r requirements.txt && \ | ||
cd /intel-extension-for-transformers/intel_extension_for_transformers/neural_chat && pip install -r requirements_cpu.txt && \ | ||
pip install astunparse ninja pyyaml mkl mkl-include setuptools cmake cffi future six requests dataclasses && \ | ||
pip install typing_extensions datasets accelerate SentencePiece evaluate nltk rouge_score protobuf==3.20.1 tokenizers einops peft | ||
|
||
# Enable passwordless ssh for mpirun | ||
RUN mkdir /var/run/sshd | ||
RUN passwd -d root | ||
RUN sed -i'' -e's/^#PermitRootLogin prohibit-password$/PermitRootLogin yes/' /etc/ssh/sshd_config \ | ||
&& sed -i'' -e's/^#PasswordAuthentication yes$/PasswordAuthentication yes/' /etc/ssh/sshd_config \ | ||
&& sed -i'' -e's/^#PermitEmptyPasswords no$/PermitEmptyPasswords yes/' /etc/ssh/sshd_config \ | ||
&& sed -i'' -e's/^UsePAM yes/UsePAM no/' /etc/ssh/sshd_config \ | ||
&& echo "Port "$SSHD_PORT"" >> /etc/ssh/sshd_config \ | ||
&& echo "Host *" >> /etc/ssh/ssh_config \ | ||
&& echo " Port "$SSHD_PORT"" >> /etc/ssh/ssh_config \ | ||
&& echo " StrictHostKeyChecking no" >> /etc/ssh/ssh_config | ||
EXPOSE ${SSHD_PORT} | ||
|
||
|
||
ENTRYPOINT ["neuralchat_server"] | ||
CMD ["start", "--config_file", "/tgi.yaml"] | ||
|
38 changes: 38 additions & 0 deletions
38
intel_extension_for_transformers/neural_chat/docker/tgi_serving/README.md
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,38 @@ | ||
Intel Neural Chat Inference Dockerfile installer for Ubuntu22.04 | ||
|
||
# Start NeuralChat and TGI serving with Docker | ||
|
||
## Environment Setup | ||
|
||
### Setup Xeon SPR Environment | ||
Use Dockerfile_tgi to build Docker image in your environment. | ||
```bash | ||
docker build . -f Dockerfile_tgi -t neuralchat_tgi:latest | ||
``` | ||
If you need to set proxy settings, add `--build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy` like below. | ||
```bash | ||
docker build . -f Dockerfile_tgi -t neuralchat_tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy | ||
``` | ||
|
||
### Start NeuralChat Service | ||
Before starting NeuralChat services, you need to configure `tgi.yaml` according to you read environment. | ||
Make sure the specified `port` is available, `device` is `cpu` (`auto` will not work). | ||
Other detailed parameters please refer to `intel_extension_for_transformers/neural_chat/examples/serving/TGI/README.md` | ||
|
||
```bash | ||
docker run -it --net=host --ipc=host -v /var/run/docker.sock:/var/run/docker.sock -v ./tgi.yaml:/tgi.yaml neuralchat_tgi:latest | ||
``` | ||
|
||
|
||
## Consume the Service | ||
when `docker run` command is successfully executed, you can consume the HTTP services offered by NeuralChat. | ||
|
||
Here is an example of consuming TGI service, remember to substitute your real ip and port. | ||
```bash | ||
curl ${your_ip}:${your_port}/v1/tgi/generate \ | ||
-X POST \ | ||
-d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' \ | ||
-H 'Content-Type: application/json' | ||
``` | ||
|
||
|
33 changes: 33 additions & 0 deletions
33
intel_extension_for_transformers/neural_chat/docker/tgi_serving/tgi.yaml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,33 @@ | ||
#!/usr/bin/env python | ||
# -*- coding: utf-8 -*- | ||
# | ||
# Copyright (c) 2023 Intel Corporation | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
# This is the parameter configuration file for NeuralChat Serving. | ||
|
||
################################################################################# | ||
# SERVER SETTING # | ||
################################################################################# | ||
host: 0.0.0.0 | ||
port: 8000 | ||
|
||
model_name_or_path: "Intel/neural-chat-7b-v3-1" | ||
device: "cpu" | ||
|
||
serving: | ||
framework: "tgi" | ||
|
||
|
||
tasks_list: ['textchat', 'tgi'] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters