Skip to content

Commit

Permalink
Simplify runtests.jl (#531)
Browse files Browse the repository at this point in the history
* cl/test

* reinstate cpu tests on CUDA worker
  • Loading branch information
CarloLucibello authored Sep 11, 2023
1 parent 1b30040 commit 83df642
Show file tree
Hide file tree
Showing 4 changed files with 110 additions and 136 deletions.
10 changes: 6 additions & 4 deletions .buildkite/pipeline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,10 @@ steps:
queue: "juliagpu"
cuda: "*"
env:
NNLIB_TEST_CUDA: true
NNLIB_TEST_CUDA: "true"
NNLIB_TEST_CPU: "false"
if: build.message !~ /\[skip tests\]/
timeout_in_minutes: 60
timeout_in_minutes: 180
matrix:
setup:
julia:
Expand Down Expand Up @@ -47,10 +48,11 @@ steps:
JULIA_AMDGPU_CORE_MUST_LOAD: "1"
JULIA_AMDGPU_HIP_MUST_LOAD: "1"
JULIA_AMDGPU_DISABLE_ARTIFACTS: "1"
NNLIB_TEST_AMDGPU: true
NNLIB_TEST_AMDGPU: "true"
NNLIB_TEST_CPU: "true" # Could be useful to uncover multithreading related issues
# Buildkite workers have more threads.
JULIA_NUM_THREADS: 4


- label: "Benchmarks"
plugins:
- JuliaCI/julia#v1:
Expand Down
20 changes: 20 additions & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,23 @@ GPUArraysCore = "0.1"
KernelAbstractions = "0.9.2"
Requires = "1.0"
julia = "1.9"

[extras]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
ChainRulesTestUtils = "cdddcdb0-9152-4a09-a978-84456f9df70a"
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
Logging = "56ddb016-857b-54e1-b83d-db4d58db5568"
ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267"
StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"

[targets]
test = ["AMDGPU", "CUDA", "ChainRulesTestUtils", "Documenter",
"FiniteDifferences", "ForwardDiff", "Logging", "ReverseDiff",
"StableRNGs", "Test", "UnicodePlots", "Zygote", "cuDNN"]
20 changes: 0 additions & 20 deletions test/Project.toml

This file was deleted.

196 changes: 84 additions & 112 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,9 @@ import ReverseDiff as RD # used in `pooling.jl`

DocMeta.setdocmeta!(NNlib, :DocTestSetup, :(using NNlib, UnicodePlots); recursive=true)

ENV["NNLIB_TEST_CUDA"] = true # uncomment to run CUDA tests
# ENV["NNLIB_TEST_AMDGPU"] = true # uncomment to run AMDGPU tests
# ENV["NNLIB_TEST_CUDA"] = "true" # uncomment to run CUDA tests
# ENV["NNLIB_TEST_AMDGPU"] = "true" # uncomment to run AMDGPU tests
# ENV["NNLIB_TEST_CPU"] = "false" # uncomment to skip CPU tests

const rng = StableRNG(123)
include("test_utils.jl")
Expand Down Expand Up @@ -49,138 +50,109 @@ function nnlib_testsuite(Backend; skip_tests = Set{String}())
end
end

@testset "NNlib.jl" verbose=true begin
@testset verbose=true "Test Suite" begin
@testset "CPU" begin
nnlib_testsuite(CPU)
end
@testset verbose=true "NNlib.jl" begin

if get(ENV, "NNLIB_TEST_CUDA", "false") == "true"
using CUDA
if CUDA.functional()
@testset "CUDABackend" begin
nnlib_testsuite(CUDABackend; skip_tests=Set(("Scatter", "Gather")))
end
else
@info "CUDA.jl is not functional. Skipping test suite for CUDABackend."
if get(ENV, "NNLIB_TEST_CPU", "true") == "true"
@testset "CPU" begin
@testset "Doctests" begin
doctest(NNlib, manual=false)
end
else
@info "Skipping CUDA tests, set NNLIB_TEST_CUDA=true to run them."
end

if get(ENV, "NNLIB_TEST_AMDGPU", "false") == "true"
import Pkg
test_info = Pkg.project()
Pkg.develop("AMDGPU")

using AMDGPU
if AMDGPU.functional()
@testset "ROCBackend" begin
nnlib_testsuite(ROCBackend)
AMDGPU.synchronize(; blocking=false)
end
else
@info "AMDGPU.jl is not functional. Skipping test suite for ROCBackend."
end
else
@info "Skipping AMDGPU tests, set NNLIB_TEST_AMDGPU=true to run them."
end
end
nnlib_testsuite(CPU)

@testset verbose=true "Tests" begin
if get(ENV, "NNLIB_TEST_CUDA", "false") == "true"
using CUDA
if CUDA.functional()
@testset "CUDA" begin
include("ext_cuda/runtests.jl")
end
else
@info "Insufficient version or CUDA not found; Skipping CUDA tests"
@testset "Activation Functions" begin
include("activations.jl")
include("bias_act.jl")
end
else
@info "Skipping CUDA tests, set NNLIB_TEST_CUDA=true to run them"
end

if get(ENV, "NNLIB_TEST_AMDGPU", "false") == "true"
import Pkg
test_info = Pkg.project()
Pkg.develop("AMDGPU")

using AMDGPU
AMDGPU.versioninfo()
if AMDGPU.functional() && AMDGPU.functional(:MIOpen)
@show AMDGPU.MIOpen.version()
@testset "AMDGPU" begin
include("ext_amdgpu/runtests.jl")
AMDGPU.synchronize(; blocking=false)
end
else
@info "AMDGPU.jl package is not functional. Skipping AMDGPU tests."
@testset "Attention" begin
include("attention.jl")
end
else
@info "Skipping AMDGPU tests, set NNLIB_TEST_AMDGPU=true to run them."
end

@testset "Doctests" begin
doctest(NNlib, manual=false)
end

@testset "Activation Functions" begin
include("activations.jl")
include("bias_act.jl")
end
@testset "Batched Multiplication" begin
include("batchedmul.jl")
end

@testset "Attention" begin
include("attention.jl")
end
@testset "Convolution" begin
include("conv.jl")
include("conv_bias_act.jl")
end

@testset "Batched Multiplication" begin
include("batchedmul.jl")
end
@testset "CTC Loss" begin
include("ctc.jl")
end

@testset "Convolution" begin
include("conv.jl")
include("conv_bias_act.jl")
end
@testset "Dropout" begin
include("dropout.jl")
end

@testset "CTC Loss" begin
include("ctc.jl")
end
@testset "Fold/Unfold" begin
include("fold.jl")
end

@testset "Dropout" begin
include("dropout.jl")
end
@testset "Inference" begin
include("inference.jl")
end

@testset "Fold/Unfold" begin
include("fold.jl")
end
@testset "Pooling" begin
include("pooling.jl")
end

@testset "Inference" begin
include("inference.jl")
end
@testset "Padding" begin
include("padding.jl")
end

@testset "Pooling" begin
include("pooling.jl")
end
@testset "Softmax" begin
include("softmax.jl")
end

@testset "Padding" begin
include("padding.jl")
end
@testset "Utilities" begin
include("utils.jl")
end

@testset "Softmax" begin
include("softmax.jl")
end
@testset "Grid Sampling" begin
include("sampling.jl")
end

@testset "Utilities" begin
include("utils.jl")
@testset "Functions" begin
include("functions.jl")
end
end
else
@info "Skipping CPU tests, set NNLIB_TEST_CPU=true to run them."
end

@testset "Grid Sampling" begin
include("sampling.jl")
end
if get(ENV, "NNLIB_TEST_CUDA", "false") == "true"
using CUDA
if CUDA.functional()
@testset "CUDA" begin
nnlib_testsuite(CUDABackend; skip_tests=Set(("Scatter", "Gather")))

@testset "Functions" begin
include("functions.jl")
include("ext_cuda/runtests.jl")
end
else
@info "Insufficient version or CUDA not found; Skipping CUDA tests"
end
else
@info "Skipping CUDA tests, set NNLIB_TEST_CUDA=true to run them"
end

if get(ENV, "NNLIB_TEST_AMDGPU", "false") == "true"
using AMDGPU
AMDGPU.versioninfo()
if AMDGPU.functional() && AMDGPU.functional(:MIOpen)
@show AMDGPU.MIOpen.version()
@testset "AMDGPU" begin
nnlib_testsuite(ROCBackend)
AMDGPU.synchronize(; blocking=false)

include("ext_amdgpu/runtests.jl")
AMDGPU.synchronize(; blocking=false)
end
else
@info "AMDGPU.jl package is not functional. Skipping AMDGPU tests."
end
else
@info "Skipping AMDGPU tests, set NNLIB_TEST_AMDGPU=true to run them."
end
end

0 comments on commit 83df642

Please sign in to comment.