Skip to content

Commit

Permalink
Additional GPU tests (#7126)
Browse files Browse the repository at this point in the history
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: rusty1s <matthias.fey@tu-dortmund.de>
  • Loading branch information
3 people authored Apr 9, 2023
1 parent 8f40846 commit 271c113
Show file tree
Hide file tree
Showing 4 changed files with 109 additions and 62 deletions.
17 changes: 12 additions & 5 deletions test/loader/test_dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from torch_geometric.data import Data, HeteroData
from torch_geometric.loader import DataLoader
from torch_geometric.testing import get_random_edge_index
from torch_geometric.testing import get_random_edge_index, withCUDA

with_mp = sys.platform not in ['win32']
num_workers_list = [0, 2] if with_mp else [0]
Expand All @@ -16,25 +16,32 @@
multiprocessing.set_start_method('spawn')


@withCUDA
@pytest.mark.parametrize('num_workers', num_workers_list)
def test_dataloader(num_workers):
def test_dataloader(num_workers, device):
if num_workers > 0 and device != torch.device('cpu'):
return

x = torch.Tensor([[1], [1], [1]])
edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]])
face = torch.tensor([[0], [1], [2]])
y = 2.
z = torch.tensor(0.)
name = 'data'

data = Data(x=x, edge_index=edge_index, y=y, z=z, name=name)
assert str(data) == (
"Data(x=[3, 1], edge_index=[2, 4], y=2.0, z=0.0, name='data')")
data = Data(x=x, edge_index=edge_index, y=y, z=z, name=name).to(device)
assert str(data) == ("Data(x=[3, 1], edge_index=[2, 4], y=2.0, z=0.0, "
"name='data')")
data.face = face

loader = DataLoader([data, data, data, data], batch_size=2, shuffle=False,
num_workers=num_workers)
assert len(loader) == 2

for batch in loader:
assert batch.x.device == device
assert batch.edge_index.device == device
assert batch.z.device == device
assert batch.num_graphs == len(batch) == 2
assert batch.batch.tolist() == [0, 0, 0, 1, 1, 1]
assert batch.ptr.tolist() == [0, 3, 6]
Expand Down
17 changes: 9 additions & 8 deletions test/nn/conv/test_nn_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,21 @@

import torch_geometric.typing
from torch_geometric.nn import NNConv
from torch_geometric.testing import is_full_test
from torch_geometric.testing import is_full_test, withCUDA
from torch_geometric.typing import SparseTensor
from torch_geometric.utils import to_torch_coo_tensor


def test_nn_conv():
x1 = torch.randn(4, 8)
x2 = torch.randn(2, 16)
edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
value = torch.rand(edge_index.size(1), 3)
@withCUDA
def test_nn_conv(device):
x1 = torch.randn(4, 8, device=device)
x2 = torch.randn(2, 16, device=device)
edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]], device=device)
value = torch.rand(edge_index.size(1), 3, device=device)
adj1 = to_torch_coo_tensor(edge_index, value, size=(4, 4))

nn = Seq(Lin(3, 32), ReLU(), Lin(32, 8 * 32))
conv = NNConv(8, 32, nn=nn)
conv = NNConv(8, 32, nn=nn).to(device)
assert str(conv) == (
'NNConv(8, 32, aggr=add, nn=Sequential(\n'
' (0): Linear(in_features=3, out_features=32, bias=True)\n'
Expand Down Expand Up @@ -49,7 +50,7 @@ def test_nn_conv():
# Test bipartite message passing:
adj1 = to_torch_coo_tensor(edge_index, value, size=(4, 2))

conv = NNConv((8, 16), 32, nn=nn)
conv = NNConv((8, 16), 32, nn=nn).to(device)
assert str(conv) == (
'NNConv((8, 16), 32, aggr=add, nn=Sequential(\n'
' (0): Linear(in_features=3, out_features=32, bias=True)\n'
Expand Down
46 changes: 28 additions & 18 deletions test/nn/conv/test_rgcn_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,32 +3,38 @@

import torch_geometric.typing
from torch_geometric.nn import FastRGCNConv, RGCNConv
from torch_geometric.testing import is_full_test
from torch_geometric.testing import is_full_test, withCUDA
from torch_geometric.typing import SparseTensor

classes = [RGCNConv, FastRGCNConv]
confs = [(None, None), (2, None), (None, 2)]


@withCUDA
@pytest.mark.parametrize('conf', confs)
def test_rgcn_conv_equality(conf):
def test_rgcn_conv_equality(conf, device):
num_bases, num_blocks = conf

x1 = torch.randn(4, 4)
edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]])
edge_type = torch.tensor([0, 1, 1, 0, 0, 1])
x1 = torch.randn(4, 4, device=device)
edge_index = torch.tensor([
[0, 1, 1, 2, 2, 3],
[0, 0, 1, 0, 1, 1],
], device=device)
edge_type = torch.tensor([0, 1, 1, 0, 0, 1], device=device)

edge_index = torch.tensor([
[0, 1, 1, 2, 2, 3, 0, 1, 1, 2, 2, 3],
[0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1],
])
edge_type = torch.tensor([0, 1, 1, 0, 0, 1, 2, 3, 3, 2, 2, 3])
], device=device)
edge_type = torch.tensor([0, 1, 1, 0, 0, 1, 2, 3, 3, 2, 2, 3],
device=device)

torch.manual_seed(12345)
conv1 = RGCNConv(4, 32, 4, num_bases, num_blocks, aggr='sum')
conv1 = RGCNConv(4, 32, 4, num_bases, num_blocks, aggr='sum').to(device)

torch.manual_seed(12345)
conv2 = FastRGCNConv(4, 32, 4, num_bases, num_blocks, aggr='sum')
conv2 = FastRGCNConv(4, 32, 4, num_bases, num_blocks,
aggr='sum').to(device)

out1 = conv1(x1, edge_index, edge_type)
out2 = conv2(x1, edge_index, edge_type)
Expand All @@ -40,19 +46,23 @@ def test_rgcn_conv_equality(conf):
assert torch.allclose(out1, out2, atol=1e-6)


@withCUDA
@pytest.mark.parametrize('cls', classes)
@pytest.mark.parametrize('conf', confs)
def test_rgcn_conv(cls, conf):
def test_rgcn_conv(cls, conf, device):
num_bases, num_blocks = conf

x1 = torch.randn(4, 4)
x2 = torch.randn(2, 16)
idx1 = torch.arange(4)
idx2 = torch.arange(2)
edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]])
edge_type = torch.tensor([0, 1, 1, 0, 0, 1])
x1 = torch.randn(4, 4, device=device)
x2 = torch.randn(2, 16, device=device)
idx1 = torch.arange(4, device=device)
idx2 = torch.arange(2, device=device)
edge_index = torch.tensor([
[0, 1, 1, 2, 2, 3],
[0, 0, 1, 0, 1, 1],
], device=device)
edge_type = torch.tensor([0, 1, 1, 0, 0, 1], device=device)

conv = cls(4, 32, 2, num_bases, num_blocks, aggr='sum')
conv = cls(4, 32, 2, num_bases, num_blocks, aggr='sum').to(device)
assert str(conv) == f'{cls.__name__}(4, 32, num_relations=2)'

out1 = conv(x1, edge_index, edge_type)
Expand Down Expand Up @@ -87,7 +97,7 @@ def test_rgcn_conv(cls, conf):
assert torch.allclose(jit(None, adj.t()), out2, atol=1e-6)

# Test bipartite message passing:
conv = cls((4, 16), 32, 2, num_bases, num_blocks, aggr='sum')
conv = cls((4, 16), 32, 2, num_bases, num_blocks, aggr='sum').to(device)
assert str(conv) == f'{cls.__name__}((4, 16), 32, num_relations=2)'

out1 = conv((x1, x2), edge_index, edge_type)
Expand Down
91 changes: 60 additions & 31 deletions test/nn/dense/test_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,36 +7,41 @@

import torch_geometric.typing
from torch_geometric.nn import HeteroDictLinear, HeteroLinear, Linear
from torch_geometric.testing import withPackage
from torch_geometric.testing import withCUDA, withPackage

weight_inits = ['glorot', 'kaiming_uniform', None]
bias_inits = ['zeros', None]


@withCUDA
@pytest.mark.parametrize('weight', weight_inits)
@pytest.mark.parametrize('bias', bias_inits)
def test_linear(weight, bias):
x = torch.randn(3, 4, 16)
def test_linear(weight, bias, device):
x = torch.randn(3, 4, 16, device=device)
lin = Linear(16, 32, weight_initializer=weight, bias_initializer=bias)
lin = lin.to(device)
assert str(lin) == 'Linear(16, 32, bias=True)'
assert lin(x).size() == (3, 4, 32)


@withCUDA
@pytest.mark.parametrize('weight', weight_inits)
@pytest.mark.parametrize('bias', bias_inits)
def test_lazy_linear(weight, bias):
x = torch.randn(3, 4, 16)
def test_lazy_linear(weight, bias, device):
x = torch.randn(3, 4, 16, device=device)
lin = Linear(-1, 32, weight_initializer=weight, bias_initializer=bias)
lin = lin.to(device)
assert str(lin) == 'Linear(-1, 32, bias=True)'
assert lin(x).size() == (3, 4, 32)
assert str(lin) == 'Linear(16, 32, bias=True)'


@withCUDA
@pytest.mark.parametrize('dim1', [-1, 16])
@pytest.mark.parametrize('dim2', [-1, 16])
def test_load_lazy_linear(dim1, dim2):
lin1 = Linear(dim1, 32)
lin2 = Linear(dim1, 32)
def test_load_lazy_linear(dim1, dim2, device):
lin1 = Linear(dim1, 32).to(device)
lin2 = Linear(dim1, 32).to(device)
lin2.load_state_dict(lin1.state_dict())

if dim1 != -1:
Expand Down Expand Up @@ -78,19 +83,20 @@ def test_copy_unintialized_parameter():
copy.deepcopy(weight)


@withCUDA
@pytest.mark.parametrize('lazy', [True, False])
def test_copy_linear(lazy):
lin = Linear(-1 if lazy else 16, 32)
def test_copy_linear(lazy, device):
lin = Linear(-1 if lazy else 16, 32).to(device)

copied_lin = copy.copy(lin)
copied_lin = copy.copy(lin).to(device)
assert id(copied_lin) != id(lin)
assert id(copied_lin.weight) == id(lin.weight)
if not isinstance(copied_lin.weight, UninitializedParameter):
assert copied_lin.weight.data_ptr() == lin.weight.data_ptr()
assert id(copied_lin.bias) == id(lin.bias)
assert copied_lin.bias.data_ptr() == lin.bias.data_ptr()

copied_lin = copy.deepcopy(lin)
copied_lin = copy.deepcopy(lin).to(device)
assert id(copied_lin) != id(lin)
assert id(copied_lin.weight) != id(lin.weight)
if not isinstance(copied_lin.weight, UninitializedParameter):
Expand All @@ -102,11 +108,12 @@ def test_copy_linear(lazy):
assert torch.allclose(copied_lin.bias, lin.bias)


def test_hetero_linear():
x = torch.randn(3, 16)
type_vec = torch.tensor([0, 1, 2])
@withCUDA
def test_hetero_linear(device):
x = torch.randn(3, 16, device=device)
type_vec = torch.tensor([0, 1, 2], device=device)

lin = HeteroLinear(16, 32, num_types=3)
lin = HeteroLinear(16, 32, num_types=3).to(device)
assert str(lin) == 'HeteroLinear(16, 32, num_types=3, bias=True)'

out = lin(x, type_vec)
Expand All @@ -116,22 +123,27 @@ def test_hetero_linear():
assert torch.allclose(jit(x, type_vec), out)


def test_lazy_hetero_linear():
x = torch.randn(3, 16)
type_vec = torch.tensor([0, 1, 2])
@withCUDA
def test_lazy_hetero_linear(device):
x = torch.randn(3, 16, device=device)
type_vec = torch.tensor([0, 1, 2], device=device)

lin = HeteroLinear(-1, 32, num_types=3)
lin = HeteroLinear(-1, 32, num_types=3).to(device)
assert str(lin) == 'HeteroLinear(-1, 32, num_types=3, bias=True)'

out = lin(x, type_vec)
assert out.size() == (3, 32)


@withCUDA
@pytest.mark.parametrize('bias', [True, False])
def test_hetero_dict_linear(bias):
x_dict = {'v': torch.randn(3, 16), 'w': torch.randn(2, 8)}
def test_hetero_dict_linear(bias, device):
x_dict = {
'v': torch.randn(3, 16, device=device),
'w': torch.randn(2, 8, device=device),
}

lin = HeteroDictLinear({'v': 16, 'w': 8}, 32, bias=bias)
lin = HeteroDictLinear({'v': 16, 'w': 8}, 32, bias=bias).to(device)
assert str(lin) == (f"HeteroDictLinear({{'v': 16, 'w': 8}}, 32, "
f"bias={bias})")

Expand All @@ -140,9 +152,12 @@ def test_hetero_dict_linear(bias):
assert out_dict['v'].size() == (3, 32)
assert out_dict['w'].size() == (2, 32)

x_dict = {'v': torch.randn(3, 16), 'w': torch.randn(2, 16)}
x_dict = {
'v': torch.randn(3, 16, device=device),
'w': torch.randn(2, 16, device=device),
}

lin = HeteroDictLinear(16, 32, types=['v', 'w'], bias=bias)
lin = HeteroDictLinear(16, 32, types=['v', 'w'], bias=bias).to(device)
assert str(lin) == (f"HeteroDictLinear({{'v': 16, 'w': 16}}, 32, "
f"bias={bias})")

Expand All @@ -151,6 +166,15 @@ def test_hetero_dict_linear(bias):
assert out_dict['v'].size() == (3, 32)
assert out_dict['w'].size() == (2, 32)


def test_hetero_dict_linear_jit():
x_dict = {
'v': torch.randn(3, 16),
'w': torch.randn(2, 8),
}

lin = HeteroDictLinear({'v': 16, 'w': 8}, 32)

if torch_geometric.typing.WITH_GMM:
# See: https://github.com/pytorch/pytorch/pull/97960
with pytest.raises(RuntimeError, match="Unknown builtin op"):
Expand All @@ -160,10 +184,14 @@ def test_hetero_dict_linear(bias):
assert len(jit(x_dict)) == 2


def test_lazy_hetero_dict_linear():
x_dict = {'v': torch.randn(3, 16), 'w': torch.randn(2, 8)}
@withCUDA
def test_lazy_hetero_dict_linear(device):
x_dict = {
'v': torch.randn(3, 16, device=device),
'w': torch.randn(2, 8, device=device),
}

lin = HeteroDictLinear(-1, 32, types=['v', 'w'])
lin = HeteroDictLinear(-1, 32, types=['v', 'w']).to(device)
assert str(lin) == "HeteroDictLinear({'v': -1, 'w': -1}, 32, bias=True)"

out_dict = lin(x_dict)
Expand All @@ -172,15 +200,16 @@ def test_lazy_hetero_dict_linear():
assert out_dict['w'].size() == (2, 32)


@withCUDA
@withPackage('pyg_lib')
@pytest.mark.parametrize('type_vec', [
torch.tensor([0, 0, 1, 1, 2, 2]),
torch.tensor([0, 1, 2, 0, 1, 2]),
])
def test_hetero_linear_sort(type_vec):
x = torch.randn(type_vec.numel(), 16)
def test_hetero_linear_sort(type_vec, device):
x = torch.randn(type_vec.numel(), 16, device=device)

lin = HeteroLinear(16, 32, num_types=3)
lin = HeteroLinear(16, 32, num_types=3).to(device)
out = lin(x, type_vec)

for i in range(type_vec.numel()):
Expand Down

0 comments on commit 271c113

Please sign in to comment.