-
Notifications
You must be signed in to change notification settings - Fork 3
/
predict.py
69 lines (53 loc) · 2.64 KB
/
predict.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# this file is based on code publicly available at
# https://github.com/locuslab/smoothing
# written by Jeremy Cohen.
""" This script loads a base classifier and then runs PREDICT on many examples from a dataset."""
import argparse
import datetime
from time import time
import setGPU
import torch
from third_party.core import Smooth
from architectures import get_architecture
from datasets import get_dataset, DATASETS, get_num_classes
parser = argparse.ArgumentParser(description='Predict on many examples')
parser.add_argument("dataset", choices=DATASETS, help="which dataset")
parser.add_argument("base_classifier", type=str, help="path to saved pytorch model of base classifier")
parser.add_argument("sigma", type=float, help="noise hyperparameter")
parser.add_argument("outfile", type=str, help="output file")
parser.add_argument("--batch", type=int, default=1000, help="batch size")
parser.add_argument("--skip", type=int, default=1, help="how many examples to skip")
parser.add_argument("--max", type=int, default=-1, help="stop after this many examples")
parser.add_argument("--split", choices=["train", "test"], default="test", help="train or test set")
parser.add_argument("--N", type=int, default=100000, help="number of samples to use")
parser.add_argument("--alpha", type=float, default=0.001, help="failure probability")
args = parser.parse_args()
if __name__ == "__main__":
# load the base classifier
checkpoint = torch.load(args.base_classifier)
base_classifier = get_architecture(checkpoint["arch"], args.dataset)
base_classifier.load_state_dict(checkpoint['state_dict'])
# create the smoothed classifier g
smoothed_classifier = Smooth(base_classifier, get_num_classes(args.dataset), args.sigma)
# prepare output file
f = open(args.outfile, 'w')
print("idx\tlabel\tpredict\tcorrect\ttime", file=f, flush=True)
# iterate through the dataset
dataset = get_dataset(args.dataset, args.split)
for i in range(len(dataset)):
# only certify every args.skip examples, and stop after args.max examples
if i % args.skip != 0:
continue
if i == args.max:
break
(x, label) = dataset[i]
x = x.cuda()
before_time = time()
# make the prediction
prediction = smoothed_classifier.predict(x, args.N, args.alpha, args.batch)
after_time = time()
correct = int(prediction == label)
time_elapsed = str(datetime.timedelta(seconds=(after_time - before_time)))
# log the prediction and whether it was correct
print("{}\t{}\t{}\t{}\t{}".format(i, label, prediction, correct, time_elapsed), file=f, flush=True)
f.close()