Skip to content

Commit

Permalink
added notes on where the code needs to change
Browse files Browse the repository at this point in the history
  • Loading branch information
kylajones committed May 31, 2024
1 parent 19b8136 commit 02ee2ff
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 16 deletions.
7 changes: 6 additions & 1 deletion linfa/models/discrepancy_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@ def __init__(self, var_inputs):
self.stdRatio = 0.05 # standard deviation ratio
self.defOut = self.solve_t(self.defParams)

def solve_t(self, cal_inputs, noise = None):
def solve_t(self, cal_inputs):

# TODO: update cal_inputs to pass other variables for inference

num_batch = len(cal_inputs)
num_vars = len(self.var_in)
Expand All @@ -43,6 +45,7 @@ def solve_t(self, cal_inputs, noise = None):
# Return coverages
return cov_frac

# TODO: update cal_inputs to include variable number of parameters not called by the model
def solve_true(self, cal_inputs):

num_batch = len(cal_inputs)
Expand Down Expand Up @@ -80,8 +83,10 @@ def genDataFile(self, dataFileNamePrefix='observations', use_true_model=True, st

# solve model
if(use_true_model):
# TODO: update call to solve_true to include additional parameters
def_out = self.solve_true(self.defParams)
else:
# TODO: update call to solve_t to include additional parameters
def_out = self.solve_t(self.defParams)

# get standard deviation
Expand Down
26 changes: 11 additions & 15 deletions linfa/tests/test_no_disc_TP15_error_estimation.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,12 @@ def run_test():

exp.device = torch.device('cuda:0' if torch.cuda.is_available() and not exp.no_cuda else 'cpu')


## TODO: How to update this transformation to include variance of noise
# Define transformation
trsf_info = [['tanh', -30.0, 30.0, 500.0, 1500.0],
['tanh', -30.0, 30.0, -30000.0, -15000.0]]
['tanh', -30.0, 30.0, -30000.0, -15000.0],
['identity', ]]
trsf = Transformation(trsf_info)

# Apply the transformation
Expand Down Expand Up @@ -102,6 +105,7 @@ def run_test():
# Define log density
def log_density(calib_inputs, model, surrogate, transform):

# TODO: need to update calib_inputs to include the variance of the nouse
# Compute transformation by log Jacobian
adjust = transform.compute_log_jacob_func(calib_inputs)

Expand All @@ -111,6 +115,7 @@ def log_density(calib_inputs, model, surrogate, transform):
# Initialize total number of variable inputs
total_var_inputs = len(model.var_in)

# TODO: update solve_t to include an additional parameter in calib_inputs
# Evaluate model response - (num_var x num_batch)
modelOut = langmuir_model.solve_t(transform.forward(calib_inputs)).t()

Expand All @@ -122,7 +127,8 @@ def log_density(calib_inputs, model, surrogate, transform):
discrepancy = surrogate.forward(model.var_in)

# Get the absolute values of the standard deviation (num_var)
std_dev = torch.mean(langmuir_model.defOut) * langmuir_model.stdRatio
# TODO : remove this and put in the likelihood function
std_dev = calib_inputs[3] #torch.mean(langmuir_model.defOut) * langmuir_model.stdRatio

# Get data - (num_var x num_obs)
Data = torch.tensor(langmuir_model.data[:,2:]).to(exp.device)
Expand All @@ -142,16 +148,6 @@ def log_density(calib_inputs, model, surrogate, transform):
# - 1 / (2 * sigma^2) sum_{i = 1} ^ N (eta_i + disc_i - y_i)^2
l3 = -0.5 / (std_dev ** 2) * torch.sum((modelOut + discrepancy.t() - Data[:,loopA].unsqueeze(0))**2, dim = 1)

if(False):
print('Compare')
print('%15s %15s %15s %15s' % ('lf out','discrep','lf+discr','obs'))
for loopB in range(discrepancy.size(0)):
test1 = modelOut[0,:]
test2 = discrepancy[:,0]
test3 = Data[:,loopA]
print('%15.3f %15.3f %15.3f %15.3f' % (modelOut[0,loopB],discrepancy[loopB,0],modelOut[0,loopB]+discrepancy[loopB,0],Data[loopB,loopA]))
print('')

# Compute negative ll (num_batch x 1)
negLL = -(l1 + l2 + l3) # sum contributions
res = -negLL.reshape(calib_inputs.size(0), 1) # reshape
Expand All @@ -166,15 +162,15 @@ def log_density(calib_inputs, model, surrogate, transform):
exp.model_logdensity = lambda x: log_density(x, exp.model, exp.surrogate, exp.transform)

# Define log prior
# TODO: can we use a half-normal or truncated normal prior? How to enforce bounds?
def log_prior(calib_inputs, transform):
# Compute transformation log Jacobian
adjust = transform.compute_log_jacob_func(calib_inputs)
# Compute the calibration inputs in the physical domain
phys_inputs = transform.forward(calib_inputs)
# Define prior moments
pr_avg = torch.tensor([[1E3, -21E3, 0.0]])
pr_std = torch.tensor([[1E2, 500, ^2]])
# TODO: put a Gamma prior on the variance of the error
pr_avg = torch.tensor([[1E3, -21E3]])
pr_std = torch.tensor([[1E2, 500]])

# Eval log prior
l1 = -0.5 * calib_inputs.size(1) * np.log(2.0 * np.pi)
Expand Down

0 comments on commit 02ee2ff

Please sign in to comment.