From e79edc6d427d6ec5b08928d2f314ef9d0e115b53 Mon Sep 17 00:00:00 2001 From: CarloLucibello Date: Sun, 1 Dec 2024 09:12:05 +0100 Subject: [PATCH] fix --- GNNlib/test/msgpass.jl | 10 +++------- GNNlib/test/test_module.jl | 16 ++++++++++++++- GraphNeuralNetworks/test/layers/conv.jl | 2 +- GraphNeuralNetworks/test/test_module.jl | 26 ++++++++++++++++++++++--- 4 files changed, 42 insertions(+), 12 deletions(-) diff --git a/GNNlib/test/msgpass.jl b/GNNlib/test/msgpass.jl index 5741cdb5a..a9dff3a56 100644 --- a/GNNlib/test/msgpass.jl +++ b/GNNlib/test/msgpass.jl @@ -168,8 +168,7 @@ end @testset "copy_xj +" begin for g in TEST_GRAPHS - dev = gpu_device(force=true) - broken = get_graph_type(g) == :sparse && dev isa AMDGPUDevice + broken = get_graph_type(g) == :sparse && gpu_backend() == "AMDGPU" f(g, x) = propagate(copy_xj, g, +, xj = x) @test test_gradients( f, g, g.x; test_gpu=true, test_grad_f=false, compare_finite_diff=false @@ -179,8 +178,7 @@ end @testset "copy_xj mean" begin for g in TEST_GRAPHS - dev = gpu_device(force=true) - broken = get_graph_type(g) == :sparse && dev isa AMDGPUDevice + broken = get_graph_type(g) == :sparse && gpu_backend() == "AMDGPU" f(g, x) = propagate(copy_xj, g, mean, xj = x) @test test_gradients( f, g, g.x; test_gpu=true, test_grad_f=false, compare_finite_diff=false @@ -190,8 +188,7 @@ end @testset "e_mul_xj +" begin for g in TEST_GRAPHS - dev = gpu_device(force=true) - broken = get_graph_type(g) == :sparse && dev isa AMDGPUDevice + broken = get_graph_type(g) == :sparse && gpu_backend() == "AMDGPU" e = rand(Float32, size(g.x, 1), g.num_edges) f(g, x, e) = propagate(e_mul_xj, g, +; xj = x, e) @test test_gradients( @@ -207,7 +204,6 @@ end g = set_edge_weight(g, w) return propagate(w_mul_xj, g, +, xj = x) end - dev = gpu_device(force=true) # @show get_graph_type(g) has_isolated_nodes(g) # broken = get_graph_type(g) == :sparse broken = true diff --git a/GNNlib/test/test_module.jl b/GNNlib/test/test_module.jl index 27a83154c..b6894cdfa 100644 --- a/GNNlib/test/test_module.jl +++ b/GNNlib/test/test_module.jl @@ -45,7 +45,7 @@ using Flux: Flux # from this module export D_IN, D_OUT, GRAPH_TYPES, TEST_GRAPHS, test_gradients, finitediff_withgradient, - check_equal_leaves + check_equal_leaves, gpu_backend const D_IN = 3 @@ -177,4 +177,18 @@ TEST_GRAPHS = [generate_test_graphs(:coo)..., generate_test_graphs(:dense)..., generate_test_graphs(:sparse)...] + +function gpu_backend() + dev = gpu_device() + if dev isa CUDADevice + return "CUDA" + elseif dev isa AMDGPUDevice + return "AMDGPU" + elseif dev isa MetalDevice + return "Metal" + else + return "Unknown" + end +end + end # module \ No newline at end of file diff --git a/GraphNeuralNetworks/test/layers/conv.jl b/GraphNeuralNetworks/test/layers/conv.jl index 8ea2a48db..4e7d87fd7 100644 --- a/GraphNeuralNetworks/test/layers/conv.jl +++ b/GraphNeuralNetworks/test/layers/conv.jl @@ -102,7 +102,7 @@ end l = ChebConv(D_IN => D_OUT, k) for g in TEST_GRAPHS has_isolated_nodes(g) && continue - broken = get_graph_type(g) == :sparse || gpu_device() isa AMDGPUDevice + broken = get_graph_type(g) == :sparse || gpu_backend() == "AMDGPU" @test size(l(g, g.x)) == (D_OUT, g.num_nodes) broken=broken @test test_gradients( l, g, g.x, rtol = RTOL_LOW, test_gpu = true, compare_finite_diff = false diff --git a/GraphNeuralNetworks/test/test_module.jl b/GraphNeuralNetworks/test/test_module.jl index fa50e5821..0cd77f7f8 100644 --- a/GraphNeuralNetworks/test/test_module.jl +++ b/GraphNeuralNetworks/test/test_module.jl @@ -38,9 +38,15 @@ using SparseArrays # from Base export mean, randn, SparseArrays, AbstractSparseMatrix -# from other packages -export Flux, gradient, Dense, Chain, relu, random_regular_graph, erdos_renyi, - BatchNorm, LayerNorm, Dropout, Parallel +# from Flux.jl +export Flux, gradient, Dense, Chain, relu + BatchNorm, LayerNorm, Dropout, Parallel, + gpu_device, cpu_device, get_device, + CPUDevice, CUDADevice, AMDGPUDevice, MetalDevice, + gpu_backend + +# from Graphs.jl +export random_regular_graph, erdos_renyi # from this module export D_IN, D_OUT, GRAPH_TYPES, TEST_GRAPHS, @@ -178,5 +184,19 @@ TEST_GRAPHS = [generate_test_graphs(:coo)..., generate_test_graphs(:dense)..., generate_test_graphs(:sparse)...] + +function gpu_backend() + dev = gpu_device() + if dev isa CUDADevice + return "CUDA" + elseif dev isa AMDGPUDevice + return "AMDGPU" + elseif dev isa MetalDevice + return "Metal" + else + return "Unknown" + end +end + end # testmodule