From 81eb227791585ff9c37202ce1497776ba887e22c Mon Sep 17 00:00:00 2001 From: jhnwnd <43957968+jhnwnd@users.noreply.github.com> Date: Fri, 18 Oct 2019 15:33:46 +0200 Subject: [PATCH] Ubuntu 18.04 docker files (#25) * Ubuntu 18.04 docker files * Ubuntu 18.04 docker files * docker files update --- README.md | 12 +- docker/Dockerfile.centos7-gpu | 10 +- ...{Dockerfile.ubuntu16 => Dockerfile.ubuntu} | 6 +- docker/Dockerfile.ubuntu-gpu | 117 ++++++++++++++++++ docker/build_all.sh | 35 +++++- 5 files changed, 160 insertions(+), 20 deletions(-) rename docker/{Dockerfile.ubuntu16 => Dockerfile.ubuntu} (96%) create mode 100644 docker/Dockerfile.ubuntu-gpu diff --git a/README.md b/README.md index 50bd05658..216744e04 100644 --- a/README.md +++ b/README.md @@ -128,7 +128,7 @@ See the existing converters implementation which could be used as a template. Docker images are currently the recommended way to use the project as they embed all dependencies and are optimized. The GPU image supports both CPU and GPU execution: ```bash -docker pull opennmt/ctranslate2:latest-centos7-gpu +docker pull opennmt/ctranslate2:latest-ubuntu18-gpu ``` The library has several entrypoints which are briefly introduced below. The examples use the English-German model prepared in [Converting models](#converting-models). This model requires a SentencePiece tokenization. @@ -139,17 +139,17 @@ The library has several entrypoints which are briefly introduced below. The exam ```bash echo "▁H ello ▁world !" | docker run -i --rm -v $PWD:/data \ - opennmt/ctranslate2:latest-centos7-gpu --model /data/ende_ctranslate2 + opennmt/ctranslate2:latest-ubuntu18-gpu --model /data/ende_ctranslate2 ``` #### GPU ```bash echo "▁H ello ▁world !" | nvidia-docker run -i --rm -v $PWD:/data \ - opennmt/ctranslate2:latest-centos7-gpu --model /data/ende_ctranslate2 --device cuda + opennmt/ctranslate2:latest-ubuntu18-gpu --model /data/ende_ctranslate2 --device cuda ``` -*See `docker run --rm opennmt/ctranslate2:latest-centos7-gpu --help` for additional options.* +*See `docker run --rm opennmt/ctranslate2:latest-ubuntu18-gpu --help` for additional options.* ### With the Python API @@ -193,7 +193,7 @@ int main() { The Docker images are self contained and build the code from the active directory. The `build` command should be run from the project root directory, e.g.: ```bash -docker build -t opennmt/ctranslate2:latest-ubuntu16 -f docker/Dockerfile.ubuntu16 . +docker build -t opennmt/ctranslate2:latest-ubuntu18 -f docker/Dockerfile.ubuntu . ``` See the `docker/` directory for available images. @@ -202,7 +202,7 @@ See the `docker/` directory for available images. The minimum requirements for building CTranslate2 binaries are Intel MKL and the Boost `program_options` module. The instructions below assume an Ubuntu system. -**Note:** This minimal installation only enables CPU execution. For GPU support, see how the [GPU Dockerfile](docker/Dockerfile.centos7-gpu) is defined. +**Note:** This minimal installation only enables CPU execution. For GPU support, see how the [GPU Dockerfile](docker/Dockerfile.ubuntu-gpu) is defined. #### Install Intel MKL diff --git a/docker/Dockerfile.centos7-gpu b/docker/Dockerfile.centos7-gpu index 48e901f9c..736ccdeb1 100644 --- a/docker/Dockerfile.centos7-gpu +++ b/docker/Dockerfile.centos7-gpu @@ -1,4 +1,4 @@ -FROM nvidia/cuda:10.0-devel-centos7 as builder +FROM nvidia/cuda:10.0-cudnn7-devel-centos7 as builder RUN yum install -y \ boost-static \ @@ -37,12 +37,6 @@ RUN wget https://github.com/intel/mkl-dnn/archive/v$MKLDNN_VERSION.tar.gz && \ make -j4 && make install && \ cd ../.. && rm -r mkl-dnn-* -ENV CUDNN_VERSION_SHORT=7.6.4 -ENV CUDNN_VERSION=${CUDNN_VERSION_SHORT}.38 -RUN curl -fsSL http://developer.download.nvidia.com/compute/redist/cudnn/v${CUDNN_VERSION_SHORT}/cudnn-10.0-linux-x64-v${CUDNN_VERSION}.tgz -O && \ - tar --no-same-owner -xzf cudnn-10.0-linux-x64-v${CUDNN_VERSION}.tgz -C /usr/local && \ - rm cudnn-10.0-linux-x64-v${CUDNN_VERSION}.tgz - ENV TENSORRT_MAJOR_VERSION=5 ENV TENSORRT_VERSION=${TENSORRT_MAJOR_VERSION}.1.5 RUN curl -fsSL https://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/libnvinfer-devel-${TENSORRT_VERSION}-1.cuda10.0.x86_64.rpm -O && \ @@ -93,7 +87,7 @@ WORKDIR /root RUN cp /opt/intel/lib/intel64/libiomp5.so /root/ctranslate2/lib64 && \ cp -P /root/mkl-dnn/lib64/libmkldnn.so* /root/ctranslate2/lib64 && \ cp -P /usr/lib64/libboost_python*.so* /root/ctranslate2/lib64 && \ - cp -P /usr/local/cuda/lib*/libcudnn.so* /root/ctranslate2/lib64 && \ + cp -P /usr/local/cuda/lib64/libcudnn.so* /root/ctranslate2/lib64 && \ cp -P /lib64/libnvinfer.so* /root/ctranslate2/lib64 && \ cp /root/ctranslate2-dev/python/dist/*whl /root/ctranslate2 diff --git a/docker/Dockerfile.ubuntu16 b/docker/Dockerfile.ubuntu similarity index 96% rename from docker/Dockerfile.ubuntu16 rename to docker/Dockerfile.ubuntu index 74e02566c..8d1632896 100644 --- a/docker/Dockerfile.ubuntu16 +++ b/docker/Dockerfile.ubuntu @@ -1,4 +1,5 @@ -FROM ubuntu:16.04 as builder +ARG UBUNTU_VERSION=18.04 +FROM ubuntu:${UBUNTU_VERSION} as builder RUN apt-get update && \ apt-get install -y --no-install-recommends \ @@ -8,6 +9,7 @@ RUN apt-get update && \ libboost-program-options-dev \ libboost-python-dev \ python-pip \ + gnupg2 \ wget && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* @@ -75,7 +77,7 @@ RUN cp /opt/intel/lib/intel64/libiomp5.so /root/ctranslate2/lib && \ cp -P /usr/lib/x86_64-linux-gnu/libboost_python*.so* /root/ctranslate2/lib && \ cp /root/ctranslate2-dev/python/dist/*whl /root/ctranslate2 -FROM ubuntu:16.04 +FROM ubuntu:${UBUNTU_VERSION} RUN apt-get update && \ apt-get install -y --no-install-recommends \ diff --git a/docker/Dockerfile.ubuntu-gpu b/docker/Dockerfile.ubuntu-gpu new file mode 100644 index 000000000..75ff468fd --- /dev/null +++ b/docker/Dockerfile.ubuntu-gpu @@ -0,0 +1,117 @@ +ARG UBUNTU_VERSION=18.04 +FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu${UBUNTU_VERSION} as builder + +ENV TENSORRT_MAJOR_VERSION=5 +ENV TENSORRT_VERSION=${TENSORRT_MAJOR_VERSION}.1.5 + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + apt-transport-https \ + build-essential \ + ca-certificates \ + libboost-program-options-dev \ + libboost-python-dev \ + python-pip \ + gnupg2 \ + curl \ + libnvinfer${TENSORRT_MAJOR_VERSION}=${TENSORRT_VERSION}-1+cuda10.0 \ + libnvinfer-dev=${TENSORRT_VERSION}-1+cuda10.0 \ + wget && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /root + +RUN wget https://cmake.org/files/v3.12/cmake-3.12.2-Linux-x86_64.tar.gz +RUN tar xf cmake-3.12.2-Linux-x86_64.tar.gz && \ + rm cmake-3.12.2-Linux-x86_64.tar.gz +ENV PATH=$PATH:/root/cmake-3.12.2-Linux-x86_64/bin + +ENV MKL_VERSION=2019 +ENV MKL_UPDATE=5 +ENV MKL_BUILD=075 +RUN wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB && \ + apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS-*.PUB && \ + rm GPG-PUB-KEY-INTEL-SW-PRODUCTS-*.PUB && \ + echo "deb https://apt.repos.intel.com/mkl all main" > /etc/apt/sources.list.d/intel-mkl.list && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + intel-mkl-64bit-$MKL_VERSION.$MKL_UPDATE.$MKL_BUILD && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +ENV MKLDNN_ROOT=/root/mkl-dnn +ENV MKLDNN_VERSION=0.21 +RUN wget https://github.com/intel/mkl-dnn/archive/v$MKLDNN_VERSION.tar.gz && \ + tar xf v$MKLDNN_VERSION.tar.gz && rm v$MKLDNN_VERSION.tar.gz && \ + cd mkl-dnn-* && \ + mkdir build && cd build && \ + cmake -DCMAKE_INSTALL_PREFIX=${MKLDNN_ROOT} -DARCH_OPT_FLAGS="" \ + -DMKLROOT=/opt/intel/mkl -DMKLDNN_USE_MKL=FULL:STATIC -DMKLDNN_THREADING=OMP:INTEL \ + -DWITH_TEST=OFF -DWITH_EXAMPLE=OFF .. && \ + make -j4 && make install && \ + cd ../.. && rm -r mkl-dnn-* + +ENV CUB_VERSION=1.8.0 +ENV CUB_ROOT=/root/cub +RUN wget https://github.com/NVlabs/cub/archive/v${CUB_VERSION}.tar.gz && \ + tar xf v${CUB_VERSION}.tar.gz && \ + mv cub-${CUB_VERSION} ${CUB_ROOT} && \ + rm v${CUB_VERSION}.tar.gz + +WORKDIR /root/ctranslate2-dev + +COPY cli cli +COPY include include +COPY src src +COPY tests tests +COPY CMakeLists.txt . + +ARG CXX_FLAGS +ENV CXX_FLAGS=${CXX_FLAGS} +ARG CUDA_NVCC_FLAGS +ENV CUDA_NVCC_FLAGS=${CUDA_NVCC_FLAGS:-"-Xfatbin -compress-all"} +ARG CUDA_ARCH_LIST +ENV CUDA_ARCH_LIST=${CUDA_ARCH_LIST:-"Common"} + +RUN mkdir build && \ + cd build && \ + cmake -DCMAKE_INSTALL_PREFIX=/root/ctranslate2 \ + -DCMAKE_PREFIX_PATH="${CUB_ROOT};${MKLDNN_ROOT}" \ + -DWITH_CUDA=ON -DWITH_MKLDNN=ON \ + -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="${CXX_FLAGS}" \ + -DCUDA_NVCC_FLAGS="${CUDA_NVCC_FLAGS}" -DCUDA_ARCH_LIST="${CUDA_ARCH_LIST}" .. && \ + VERBOSE=1 make -j4 && \ + make install + +COPY python python + +WORKDIR /root/ctranslate2-dev/python +RUN pip --no-cache-dir install setuptools wheel +RUN CTRANSLATE2_ROOT=/root/ctranslate2 python setup.py bdist_wheel + +WORKDIR /root +RUN cp /opt/intel/lib/intel64/libiomp5.so /root/ctranslate2/lib && \ + cp -P /root/mkl-dnn/lib/libmkldnn.so* /root/ctranslate2/lib && \ + cp -P /usr/lib/x86_64-linux-gnu/libboost_python*.so* /root/ctranslate2/lib && \ + cp -P /usr/lib/x86_64-linux-gnu/libcudnn.so* /root/ctranslate2/lib && \ + cp -P /usr/lib/x86_64-linux-gnu/libnvinfer.so* /root/ctranslate2/lib && \ + cp /root/ctranslate2-dev/python/dist/*whl /root/ctranslate2 + +FROM nvidia/cuda:10.0-base-ubuntu${UBUNTU_VERSION} + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + cuda-cublas-$CUDA_PKG_VERSION \ + python-pip && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +COPY --from=builder /root/ctranslate2 /opt/ctranslate2 +RUN pip --no-cache-dir install /opt/ctranslate2/*.whl + +WORKDIR /opt + +ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/ctranslate2/lib + +ENTRYPOINT ["/opt/ctranslate2/bin/translate"] diff --git a/docker/build_all.sh b/docker/build_all.sh index 1e44ae704..0dbb00e43 100755 --- a/docker/build_all.sh +++ b/docker/build_all.sh @@ -8,6 +8,16 @@ set -e +# allow user to run this script from anywhere +# from https://stackoverflow.com/a/246128 +# one-liner which will give you the full directory name +# of the script no matter where it is being called from +unset CDPATH +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +ROOT_DIR=$DIR/.. +cd $ROOT_DIR + VERSION=${1:-latest} PUSH=${2:-0} IMAGE=opennmt/ctranslate2 @@ -15,9 +25,23 @@ IMAGE=opennmt/ctranslate2 build() { PLAT=$1 - LATEST=$IMAGE:latest-$PLAT - TAGGED=$IMAGE:$VERSION-$PLAT - docker build --no-cache -t $LATEST -f docker/Dockerfile.$PLAT . + + if [ "$#" -eq 2 ]; then + UBUNTU_VERSION_ARG="--build-arg UBUNTU_VERSION=$2" + UBUNTU_MAJOR_VERSION="${2%.*}" + if [[ "$PLAT" = *-* ]]; then + TAG_SUFFIX="${PLAT%-*}${UBUNTU_MAJOR_VERSION}-${PLAT##*-}" + else + TAG_SUFFIX=$PLAT$UBUNTU_MAJOR_VERSION + fi + else + UBUNTU_VERSION_ARG="" + TAG_SUFFIX=$PLAT + fi + + LATEST=$IMAGE:latest-$TAG_SUFFIX + TAGGED=$IMAGE:$VERSION-$TAG_SUFFIX + docker build --no-cache $UBUNTU_VERSION_ARG -t $LATEST -f docker/Dockerfile.$PLAT . if [ $PUSH -eq 1 ]; then docker push $LATEST fi @@ -29,6 +53,9 @@ build() fi } -build ubuntu16 +build ubuntu 16.04 +build ubuntu-gpu 16.04 +build ubuntu 18.04 +build ubuntu-gpu 18.04 build centos7 build centos7-gpu