Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Benchmark] Add inference test for current benchmarking and record performance #4892

Merged
merged 29 commits into from
Jul 13, 2022
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
8b7b50a
[Benchmark] Add inference and profile in citation
yanbing-j Jun 27, 2022
064d529
Print end-to-end time of inference
yanbing-j Jun 29, 2022
fcd08fb
Print end-to-end time of one epoch
yanbing-j Jun 29, 2022
0a0d349
Add inference.sh
yanbing-j Jun 30, 2022
5eb0491
Add inference and profile for to_hetero_mag
yanbing-j Jul 1, 2022
ac33b48
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 1, 2022
d5dfd35
Add inference for pna
yanbing-j Jul 4, 2022
5cb18c8
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 4, 2022
c8d3b8c
Add inference for benchmark/points/edge_cnn
yanbing-j Jul 4, 2022
e563390
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 4, 2022
b129f9d
Fix error
yanbing-j Jul 4, 2022
c3c3dae
Merge branch 'master' into yanbing/benchmark_inference
rusty1s Jul 9, 2022
1b69353
Merge branch 'yanbing/benchmark_inference' of https://github.com/yanb…
yanbing-j Jul 11, 2022
b80cd41
Update scripts
yanbing-j Jul 11, 2022
9bd310a
Add profile test to increase code coverage
yanbing-j Jul 11, 2022
5e90e81
Update script of points benchmark
yanbing-j Jul 11, 2022
f888faf
Update script for missing rename
yanbing-j Jul 12, 2022
55c5c42
Update scripts according to the comments
yanbing-j Jul 12, 2022
00bca65
Add CPU test for profile
yanbing-j Jul 12, 2022
b9a7593
Merge branch 'master' into yanbing/benchmark_inference
yanbing-j Jul 12, 2022
72f7eab
Merge branch 'master' into yanbing/benchmark_inference
yanbing-j Jul 13, 2022
cef334d
update
rusty1s Jul 13, 2022
e07d068
update
rusty1s Jul 13, 2022
b1a4620
update
rusty1s Jul 13, 2022
e6ffafe
update
rusty1s Jul 13, 2022
fea0f43
reset
rusty1s Jul 13, 2022
fd39e26
update
rusty1s Jul 13, 2022
7fd675b
changelog
rusty1s Jul 13, 2022
014e277
Merge branch 'master' into yanbing/benchmark_inference
rusty1s Jul 13, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 13 additions & 1 deletion benchmark/citation/appnp.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@
parser.add_argument('--normalize_features', type=bool, default=True)
parser.add_argument('--K', type=int, default=10)
parser.add_argument('--alpha', type=float, default=0.1)
parser.add_argument('--inference', type=bool, default=False)
rusty1s marked this conversation as resolved.
Show resolved Hide resolved
parser.add_argument('--profile', type=bool,
default=False) # Currently support profile in inference
rusty1s marked this conversation as resolved.
Show resolved Hide resolved
args = parser.parse_args()


Expand All @@ -46,5 +49,14 @@ def forward(self, data):

dataset = get_planetoid_dataset(args.dataset, args.normalize_features)
permute_masks = random_planetoid_splits if args.random_splits else None
print("appnp-{}-{}:".format(args.dataset, args.random_splits), end=' ')
lightaime marked this conversation as resolved.
Show resolved Hide resolved
run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay,
args.early_stopping, permute_masks)
args.early_stopping, args.inference, args.profile, permute_masks)

if args.profile:
rusty1s marked this conversation as resolved.
Show resolved Hide resolved
import os
import pathlib
profile_dir = str(pathlib.Path.cwd()) + '/'
timeline_file = profile_dir + 'profile-citation-APPNP-' + args.dataset + '-random_splits-' + str(
lightaime marked this conversation as resolved.
Show resolved Hide resolved
args.random_splits) + '.json'
os.rename('timeline.json', timeline_file)
14 changes: 13 additions & 1 deletion benchmark/citation/arma.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--shared_weights', type=bool, default=False)
parser.add_argument('--skip_dropout', type=float, default=0.75)
parser.add_argument('--inference', type=bool, default=False)
parser.add_argument('--profile', type=bool,
default=False) # Currently support profile in inference
args = parser.parse_args()


Expand Down Expand Up @@ -48,5 +51,14 @@ def forward(self, data):

dataset = get_planetoid_dataset(args.dataset, args.normalize_features)
permute_masks = random_planetoid_splits if args.random_splits else None
print("arma-{}-{}:".format(args.dataset, args.random_splits), end=' ')
run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay,
args.early_stopping, permute_masks)
args.early_stopping, args.inference, args.profile, permute_masks)

if args.profile:
import os
import pathlib
profile_dir = str(pathlib.Path.cwd()) + '/'
timeline_file = profile_dir + 'profile-citation-ARMA-' + args.dataset + '-random_splits-' + str(
args.random_splits) + '.json'
os.rename('timeline.json', timeline_file)
14 changes: 13 additions & 1 deletion benchmark/citation/cheb.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--normalize_features', type=bool, default=True)
parser.add_argument('--num_hops', type=int, default=3)
parser.add_argument('--inference', type=bool, default=False)
parser.add_argument('--profile', type=bool,
default=False) # Currently support profile in inference
args = parser.parse_args()


Expand All @@ -41,5 +44,14 @@ def forward(self, data):

dataset = get_planetoid_dataset(args.dataset, args.normalize_features)
permute_masks = random_planetoid_splits if args.random_splits else None
print("cheby-{}-{}:".format(args.dataset, args.random_splits), end=' ')
run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay,
args.early_stopping, permute_masks)
args.early_stopping, args.inference, args.profile, permute_masks)

if args.profile:
import os
import pathlib
profile_dir = str(pathlib.Path.cwd()) + '/'
timeline_file = profile_dir + 'profile-citation-CHEBY-' + args.dataset + '-random_splits-' + str(
args.random_splits) + '.json'
os.rename('timeline.json', timeline_file)
14 changes: 13 additions & 1 deletion benchmark/citation/gat.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@
parser.add_argument('--normalize_features', type=bool, default=True)
parser.add_argument('--heads', type=int, default=8)
parser.add_argument('--output_heads', type=int, default=1)
parser.add_argument('--inference', type=bool, default=False)
parser.add_argument('--profile', type=bool,
default=False) # Currently support profile in inference
args = parser.parse_args()


Expand Down Expand Up @@ -46,5 +49,14 @@ def forward(self, data):

dataset = get_planetoid_dataset(args.dataset, args.normalize_features)
permute_masks = random_planetoid_splits if args.random_splits else None
print("gat-{}-{}:".format(args.dataset, args.random_splits), end=' ')
run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay,
args.early_stopping, permute_masks)
args.early_stopping, args.inference, args.profile, permute_masks)

if args.profile:
import os
import pathlib
profile_dir = str(pathlib.Path.cwd()) + '/'
timeline_file = profile_dir + 'profile-citation-GAT-' + args.dataset + '-random_splits-' + str(
args.random_splits) + '.json'
os.rename('timeline.json', timeline_file)
14 changes: 13 additions & 1 deletion benchmark/citation/gcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@
parser.add_argument('--hidden', type=int, default=16)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--normalize_features', type=bool, default=True)
parser.add_argument('--inference', type=bool, default=False)
parser.add_argument('--profile', type=bool,
default=False) # Currently support profile in inference
args = parser.parse_args()


Expand All @@ -40,5 +43,14 @@ def forward(self, data):

dataset = get_planetoid_dataset(args.dataset, args.normalize_features)
permute_masks = random_planetoid_splits if args.random_splits else None
print("gcn-{}-{}:".format(args.dataset, args.random_splits), end=' ')
run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay,
args.early_stopping, permute_masks)
args.early_stopping, args.inference, args.profile, permute_masks)

if args.profile:
import os
import pathlib
profile_dir = str(pathlib.Path.cwd()) + '/'
timeline_file = profile_dir + 'profile-citation-GCN-' + args.dataset + '-random_splits-' + str(
args.random_splits) + '.json'
os.rename('timeline.json', timeline_file)
118 changes: 118 additions & 0 deletions benchmark/citation/inference.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
#!/bin/sh

echo "Cora"
echo "===="

echo "GCN"
python gcn.py --dataset=Cora --inference=True
rusty1s marked this conversation as resolved.
Show resolved Hide resolved
python gcn.py --dataset=Cora --random_splits=True --inference=True
python gcn.py --dataset=Cora --inference=True --profile=True
python gcn.py --dataset=Cora --random_splits=True --inference=True --profile=True

echo "GAT"
python gat.py --dataset=Cora --inference=True
python gat.py --dataset=Cora --random_splits=True --inference=True
python gat.py --dataset=Cora --inference=True --profile=True
python gat.py --dataset=Cora --random_splits=True --inference=True --profile=True

echo "Cheby"
python cheb.py --dataset=Cora --num_hops=3 --inference=True
python cheb.py --dataset=Cora --num_hops=3 --random_splits=True --inference=True
python cheb.py --dataset=Cora --num_hops=3 --inference=True --profile=True
python cheb.py --dataset=Cora --num_hops=3 --random_splits=True --inference=True --profile=True

echo "SGC"
python sgc.py --dataset=Cora --K=3 --weight_decay=0.0005 --inference=True
python sgc.py --dataset=Cora --K=3 --weight_decay=0.0005 --random_splits=True --inference=True
python sgc.py --dataset=Cora --K=3 --weight_decay=0.0005 --inference=True --profile=True
python sgc.py --dataset=Cora --K=3 --weight_decay=0.0005 --random_splits=True --inference=True --profile=True

echo "ARMA"
python arma.py --dataset=Cora --num_stacks=2 --num_layers=1 --shared_weights=True --inference=True
python arma.py --dataset=Cora --num_stacks=3 --num_layers=1 --shared_weights=True --random_splits=True --inference=True
python arma.py --dataset=Cora --num_stacks=2 --num_layers=1 --shared_weights=True --inference=True --profile=True
python arma.py --dataset=Cora --num_stacks=3 --num_layers=1 --shared_weights=True --random_splits=True --inference=True --profile=True

echo "APPNP"
python appnp.py --dataset=Cora --alpha=0.1 --inference=True
python appnp.py --dataset=Cora --alpha=0.1 --random_splits=True --inference=True
python appnp.py --dataset=Cora --alpha=0.1 --inference=True --profile=True
python appnp.py --dataset=Cora --alpha=0.1 --random_splits=True --inference=True --profile=True

echo "CiteSeer"
echo "========"

echo "GCN"
python gcn.py --dataset=CiteSeer --inference=True
python gcn.py --dataset=CiteSeer --random_splits=True --inference=True
python gcn.py --dataset=CiteSeer --inference=True --profile=True
python gcn.py --dataset=CiteSeer --random_splits=True --inference=True --profile=True

echo "GAT"
python gat.py --dataset=CiteSeer --inference=True
python gat.py --dataset=CiteSeer --random_splits=True --inference=True
python gat.py --dataset=CiteSeer --inference=True --profile=True
python gat.py --dataset=CiteSeer --random_splits=True --inference=True --profile=True

echo "Cheby"
python cheb.py --dataset=CiteSeer --num_hops=2 --inference=True
python cheb.py --dataset=CiteSeer --num_hops=3 --random_splits=True --inference=True
python cheb.py --dataset=CiteSeer --num_hops=2 --inference=True --profile=True
python cheb.py --dataset=CiteSeer --num_hops=3 --random_splits=True --inference=True --profile=True

echo "SGC"
python sgc.py --dataset=CiteSeer --K=2 --weight_decay=0.005 --inference=True
python sgc.py --dataset=CiteSeer --K=2 --weight_decay=0.005 --random_splits=True --inference=True
python sgc.py --dataset=CiteSeer --K=2 --weight_decay=0.005 --inference=True --profile=True
python sgc.py --dataset=CiteSeer --K=2 --weight_decay=0.005 --random_splits=True --inference=True --profile=True

echo "ARMA"
python arma.py --dataset=CiteSeer --num_stacks=3 --num_layers=1 --shared_weights=True --inference=True
python arma.py --dataset=CiteSeer --num_stacks=3 --num_layers=1 --shared_weights=True --random_splits=True --inference=True
python arma.py --dataset=CiteSeer --num_stacks=3 --num_layers=1 --shared_weights=True --inference=True --profile=True
python arma.py --dataset=CiteSeer --num_stacks=3 --num_layers=1 --shared_weights=True --random_splits=True --inference=True --profile=True

echo "APPNP"
python appnp.py --dataset=CiteSeer --alpha=0.1 --inference=True
python appnp.py --dataset=CiteSeer --alpha=0.1 --random_splits=True --inference=True
python appnp.py --dataset=CiteSeer --alpha=0.1 --inference=True --profile=True
python appnp.py --dataset=CiteSeer --alpha=0.1 --random_splits=True --inference=True --profile=True

echo "PubMed"
echo "======"

echo "GCN"
python gcn.py --dataset=PubMed --inference=True
python gcn.py --dataset=PubMed --random_splits=True --inference=True
python gcn.py --dataset=PubMed --inference=True --profile=True
python gcn.py --dataset=PubMed --random_splits=True --inference=True --profile=True

echo "GAT"
python gat.py --dataset=PubMed --lr=0.01 --weight_decay=0.001 --output_heads=8 --inference=True
python gat.py --dataset=PubMed --lr=0.01 --weight_decay=0.001 --output_heads=8 --random_splits=True --inference=True
python gat.py --dataset=PubMed --lr=0.01 --weight_decay=0.001 --output_heads=8 --inference=True --profile=True
python gat.py --dataset=PubMed --lr=0.01 --weight_decay=0.001 --output_heads=8 --random_splits=True --inference=True --profile=True

echo "Cheby"
python cheb.py --dataset=PubMed --num_hops=2 --inference=True
python cheb.py --dataset=PubMed --num_hops=2 --random_splits=True --inference=True
python cheb.py --dataset=PubMed --num_hops=2 --inference=True --profile=True
python cheb.py --dataset=PubMed --num_hops=2 --random_splits=True --inference=True --profile=True

echo "SGC"
python sgc.py --dataset=PubMed --K=2 --weight_decay=0.0005 --inference=True
python sgc.py --dataset=PubMed --K=2 --weight_decay=0.0005 --random_splits=True --inference=True
python sgc.py --dataset=PubMed --K=2 --weight_decay=0.0005 --inference=True --profile=True
python sgc.py --dataset=PubMed --K=2 --weight_decay=0.0005 --random_splits=True --inference=True --profile=True

echo "ARMA"
python arma.py --dataset=PubMed --num_stacks=2 --num_layers=1 --skip_dropout=0 --inference=True
python arma.py --dataset=PubMed --num_stacks=2 --num_layers=1 --skip_dropout=0.5 --random_splits=True --inference=True
python arma.py --dataset=PubMed --num_stacks=2 --num_layers=1 --skip_dropout=0 --inference=True --profile=True
python arma.py --dataset=PubMed --num_stacks=2 --num_layers=1 --skip_dropout=0.5 --random_splits=True --inference=True --profile=True

echo "APPNP"
python appnp.py --dataset=PubMed --alpha=0.1 --inference=True
python appnp.py --dataset=PubMed --alpha=0.1 --random_splits=True --inference=True
python appnp.py --dataset=PubMed --alpha=0.1 --inference=True --profile=True
python appnp.py --dataset=PubMed --alpha=0.1 --random_splits=True --inference=True --profile=True
14 changes: 13 additions & 1 deletion benchmark/citation/sgc.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@
parser.add_argument('--early_stopping', type=int, default=10)
parser.add_argument('--normalize_features', type=bool, default=False)
parser.add_argument('--K', type=int, default=2)
parser.add_argument('--inference', type=bool, default=False)
parser.add_argument('--profile', type=bool,
default=False) # Currently support profile in inference
args = parser.parse_args()


Expand All @@ -36,5 +39,14 @@ def forward(self, data):

dataset = get_planetoid_dataset(args.dataset, args.normalize_features)
permute_masks = random_planetoid_splits if args.random_splits else None
print("sgc-{}-{}:".format(args.dataset, args.random_splits), end=' ')
run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay,
args.early_stopping, permute_masks)
args.early_stopping, args.inference, args.profile, permute_masks)

if args.profile:
import os
import pathlib
profile_dir = str(pathlib.Path.cwd()) + '/'
timeline_file = profile_dir + 'profile-citation-SGC-' + args.dataset + '-random_splits-' + str(
args.random_splits) + '.json'
os.rename('timeline.json', timeline_file)
Loading