Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[CI] Upgrade unix gpu toolchain #18186

Merged
merged 8 commits into from
May 12, 2020
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions ci/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
# NOTE: Temporary whitelist used until all Dockerfiles are refactored for docker compose
DOCKER_COMPOSE_WHITELIST = ('centos7_cpu', 'centos7_gpu_cu92', 'centos7_gpu_cu100',
'centos7_gpu_cu101', 'centos7_gpu_cu102', 'ubuntu_cpu',
'ubuntu_build_cuda', 'ubuntu_gpu_cu101', 'publish.test.centos7_cpu',
'ubuntu_gpu_cu101', 'publish.test.centos7_cpu',
'publish.test.centos7_gpu')
# Files for docker compose
DOCKER_COMPOSE_FILES = set(('docker/build.centos7', 'docker/build.ubuntu', 'docker/publish.test.centos7'))
Expand Down Expand Up @@ -222,8 +222,9 @@ def container_run(docker_client: SafeDockerClient,

# Equivalent command
docker_cmd_list = [
"nvidia-docker" if nvidia_runtime else "docker",
"docker",
'run',
"--gpus all" if nvidia_runtime else "",
"--cap-add",
"SYS_PTRACE", # Required by ASAN
'--rm',
Expand Down
6 changes: 0 additions & 6 deletions ci/docker/Dockerfile.build.ubuntu
Original file line number Diff line number Diff line change
Expand Up @@ -160,9 +160,3 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
apt install -y --no-install-recommends \
cuda-10-1 && \
rm -rf /var/lib/apt/lists/*


FROM gpu as gpuwithcompatenv
# TVMOP requires /usr/local/cuda/compat is no LD_LIBRARY_PATH.
# This should be fixed and deleted.
ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/compat
13 changes: 1 addition & 12 deletions ci/docker/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -110,23 +110,12 @@ services:
build:
context: .
dockerfile: Dockerfile.build.ubuntu
target: gpuwithcompatenv
target: gpu
args:
BASE_IMAGE: nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04
cache_from:
- build.ubuntu_gpu_cu101:latest
- mxnetci/build.ubuntu_gpu_cu101:latest
ubuntu_build_cuda:
image: build.ubuntu_build_cuda:latest
build:
context: .
dockerfile: Dockerfile.build.ubuntu
target: gpuwithcudaruntimelibs
args:
BASE_IMAGE: nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04
cache_from:
- build.ubuntu_build_cuda:latest
- mxnetci/build.ubuntu_build_cuda:latest
###################################################################################################
# Dockerfile.publish.test based images used for testing binary artifacts on minimal systems.
###################################################################################################
Expand Down
12 changes: 6 additions & 6 deletions ci/jenkins/Jenkins_steps.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def compile_unix_mkldnn_gpu() {
ws('workspace/build-mkldnn-gpu') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_build_cuda', 'build_ubuntu_gpu_mkldnn', false)
utils.docker_run('ubuntu_gpu_cu101', 'build_ubuntu_gpu_mkldnn', false)
utils.pack_lib('mkldnn_gpu', mx_mkldnn_lib)
}
}
Expand All @@ -239,7 +239,7 @@ def compile_unix_mkldnn_nocudnn_gpu() {
ws('workspace/build-mkldnn-gpu-nocudnn') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_build_cuda', 'build_ubuntu_gpu_mkldnn_nocudnn', false)
utils.docker_run('ubuntu_gpu_cu101', 'build_ubuntu_gpu_mkldnn_nocudnn', false)
utils.pack_lib('mkldnn_gpu_nocudnn', mx_mkldnn_lib)
}
}
Expand All @@ -253,7 +253,7 @@ def compile_unix_full_gpu() {
ws('workspace/build-gpu') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_build_cuda', 'build_ubuntu_gpu_cuda101_cudnn7', false)
utils.docker_run('ubuntu_gpu_cu101', 'build_ubuntu_gpu_cuda101_cudnn7', false)
utils.pack_lib('gpu', mx_lib_cpp_examples)
}
}
Expand All @@ -267,7 +267,7 @@ def compile_unix_full_gpu_make() {
ws('workspace/build-gpu') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_build_cuda', 'build_ubuntu_gpu_cuda101_cudnn7_make', false)
utils.docker_run('ubuntu_gpu_cu101', 'build_ubuntu_gpu_cuda101_cudnn7_make', false)
utils.pack_lib('gpu_make', mx_lib_cpp_examples_make)
}
}
Expand All @@ -281,7 +281,7 @@ def compile_unix_full_gpu_mkldnn_cpp_test() {
ws('workspace/build-gpu-mkldnn-cpp') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_build_cuda', 'build_ubuntu_gpu_cuda101_cudnn7_mkldnn_cpp_test', false)
utils.docker_run('ubuntu_gpu_cu101', 'build_ubuntu_gpu_cuda101_cudnn7_mkldnn_cpp_test', false)
utils.pack_lib('gpu_mkldnn_cpp_test_make', mx_lib_cpp_capi_make)
}
}
Expand All @@ -295,7 +295,7 @@ def compile_unix_full_gpu_no_tvm_op() {
ws('workspace/build-gpu-no-tvm-op') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_build_cuda', 'build_ubuntu_gpu_cuda101_cudnn7_no_tvm_op', false)
utils.docker_run('ubuntu_gpu_cu101', 'build_ubuntu_gpu_cuda101_cudnn7_no_tvm_op', false)
utils.pack_lib('gpu_no_tvm_op', mx_lib_cpp_examples_no_tvm_op)
}
}
Expand Down
2 changes: 1 addition & 1 deletion tests/nightly/JenkinsfileForBinaries
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ core_logic: {
node(NODE_LINUX_CPU) {
ws('workspace/build-mkldnn-gpu') {
utils.init_git()
utils.docker_run('ubuntu_build_cuda', 'build_ubuntu_gpu_mkldnn', false)
utils.docker_run('ubuntu_gpu_cu101', 'build_ubuntu_gpu_mkldnn', false)
utils.pack_lib('gpu', mx_lib)
}
}
Expand Down
2 changes: 2 additions & 0 deletions tests/python/unittest/test_numpy_interoperability.py
Original file line number Diff line number Diff line change
Expand Up @@ -3250,13 +3250,15 @@ def test_np_memory_array_function():
assert op(data_mx, np.ones((5, 0))) == op(data_np, _np.ones((5, 0)))


@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/17840")
@with_seed()
@use_np
@with_array_function_protocol
def test_np_array_function_protocol():
check_interoperability(_NUMPY_ARRAY_FUNCTION_LIST)


@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/17840")
@with_seed()
@use_np
@with_array_ufunc_protocol
Expand Down
1 change: 1 addition & 0 deletions tests/python/unittest/test_numpy_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,7 @@ def check_identity_array_creation(shape, dtype):
assert type(y[1]) == np.ndarray


@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/17840")
@with_seed()
def test_np_ndarray_binary_element_wise_ops():
np_op_map = {
Expand Down
4 changes: 4 additions & 0 deletions tests/python/unittest/test_numpy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -636,6 +636,7 @@ def hybrid_forward(self, F, a, b):
assert_almost_equal(b.grad.asnumpy(), np_backward[1], rtol=1e-2, atol=1e-2)


@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/17840")
@with_seed()
@use_np
def test_np_sum():
Expand Down Expand Up @@ -1029,6 +1030,7 @@ def avg_backward(a, w, avg, axes, init_a_grad=None, init_w_grad=None):
assert_almost_equal(mx_out.asnumpy(), np_out.astype(dtype), rtol=rtol, atol=atol)


@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/17840")
@with_seed()
@use_np
def test_np_mean():
Expand Down Expand Up @@ -2220,6 +2222,7 @@ def hybrid_forward(self, F, x):
assert same(ret_mx.asnumpy(), ret_np)


@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/17840")
ChaiBapchya marked this conversation as resolved.
Show resolved Hide resolved
@with_seed()
@use_np
def test_np_unary_funcs():
Expand Down Expand Up @@ -2442,6 +2445,7 @@ def hybrid_forward(self, F, a, *args, **kwargs):
check_unary_func(func, ref_grad, shape, low, high)


@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/17840")
@with_seed()
@use_np
def test_np_binary_funcs():
Expand Down