-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathExpProcedure.py
118 lines (102 loc) · 4.68 KB
/
ExpProcedure.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 23 18:29:36 2022
@author: mickey
"""
import numpy as np
from scipy.stats import norm
from GapEstimator import BagProcedure, BatchProcedure
# import logging
# logger = logging.getLogger("gapEstimator")
def computeOptVal(gapProblem, gapEstimator, computeObjVal, getOptSol, getSample, alpha, n, numTrial, m=None, k=None, B=None, rng=None):
if rng is None:
rng = np.random.default_rng()
trueSol = getOptSol()
trueObj = computeObjVal(trueSol)
bound = np.zeros(numTrial)
pointEst = np.zeros(numTrial)
errorVar = np.zeros(numTrial)
for i in range(numTrial):
# generate sample
data = getSample(n, rng)
# compute lower bound of optimal value
if isinstance(gapEstimator, BagProcedure):
assert k is not None
assert B is not None
newBound, newPointEst, newErrorVar = gapEstimator.run(gapProblem, data, alpha, k, B)
elif isinstance(gapEstimator, BatchProcedure):
assert m is not None
newBound, newPointEst, newErrorVar = gapEstimator.run(gapProblem, data, alpha, m)
else:
newBound, newPointEst, newErrorVar = gapEstimator.run(gapProblem, data, alpha)
bound[i] = newBound
pointEst[i] = newPointEst
errorVar[i] = newErrorVar
# if (i+1) % 50 == 0:
# logger.info(f"{type(gapEstimator).__name__}: Finish trial # {i+1}")
return np.mean(bound <= trueObj), np.mean(bound), np.var(bound), np.mean(pointEst), np.var(pointEst), np.mean(newErrorVar)
def computeGapBC(gapProblem, gapEstimator, computeObjVal, getOptSol, getSample, alpha, n1, n2, numTrial, m=None, k=None, B=None, rng=None):
if rng is None:
rng = np.random.default_rng()
trueSol = getOptSol()
trueObj = computeObjVal(trueSol)
bound = np.zeros(numTrial)
pointEst = np.zeros(numTrial)
errorVar = np.zeros(numTrial)
gap = np.zeros(numTrial)
n = n1 + n2
for i in range(numTrial):
# generate sample
data = getSample(n, rng)
x0, _, _ = gapProblem.computeSAA(data[:n1])
gap[i] = computeObjVal(x0) - trueObj
mean, var = gapProblem.getSAAObj(data[n1:], x0)
upper = mean + norm.ppf(1 - alpha/2) * np.sqrt(var / n2)
# compute lower bound of optimal value
if isinstance(gapEstimator, BagProcedure):
assert k is not None
assert B is not None
lower, newPointEst, newErrorVar = gapEstimator.run(gapProblem, data, alpha/2, k, B)
elif isinstance(gapEstimator, BatchProcedure):
assert m is not None
lower, newPointEst, newErrorVar = gapEstimator.run(gapProblem, data, alpha/2, m)
else:
lower, newPointEst, newErrorVar = gapEstimator.run(gapProblem, data, alpha/2)
bound[i] = max(upper - lower, 0)
pointEst[i] = newPointEst
errorVar[i] = newErrorVar
# if (i+1) % 50 == 0:
# logger.info(f"{type(gapEstimator).__name__}: Finish trial # {i+1}")
return np.mean(bound >= gap), np.mean(bound), np.var(bound), np.mean(pointEst), np.var(pointEst), np.mean(newErrorVar)
def computeGapCRN(gapProblem, gapEstimator, computeObjVal, getOptSol, getSample, alpha, n1, n2, numTrial, m=None, k=None, B=None, rng=None):
if rng is None:
rng = np.random.default_rng()
trueSol = getOptSol()
trueObj = computeObjVal(trueSol)
bound = np.zeros(numTrial)
pointEst = np.zeros(numTrial)
errorVar = np.zeros(numTrial)
gap = np.zeros(numTrial)
n = n1 + n2
for i in range(numTrial):
# generate sample
data = getSample(n, rng)
x0, _, _ = gapProblem.computeSAA(data[:n1])
gap[i] = computeObjVal(x0) - trueObj
# compute lower bound of optimal value
if isinstance(gapEstimator, BagProcedure):
assert k is not None
assert B is not None
lower, newPointEst, newErrorVar = gapEstimator.run(gapProblem, data[n1:], alpha, k, B, x0=x0)
elif isinstance(gapEstimator, BatchProcedure):
assert m is not None
lower, newPointEst, newErrorVar = gapEstimator.run(gapProblem, data[n1:], alpha, m, x0=x0)
else:
lower, newPointEst, newErrorVar = gapEstimator.run(gapProblem, data[n1:], alpha, x0=x0)
bound[i] = max(lower, 0)
pointEst[i] = newPointEst
errorVar[i] = newErrorVar
# if (i+1) % 50 == 0:
# logger.info(f"{type(gapEstimator).__name__}: Finish trial # {i+1}")
return np.mean(bound >= gap), np.mean(bound), np.var(bound), np.mean(pointEst), np.var(pointEst), np.mean(newErrorVar)