Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP]: Enabling min max list #2100

Closed
wants to merge 36 commits into from
Closed
Show file tree
Hide file tree
Changes from 24 commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
061058e
NSGA-II implementation with properly printing optimal solutions at th…
JunyungKim Feb 19, 2023
ab4315c
Unnecessary changes in DataSet.py have been removed.
JunyungKim Feb 19, 2023
8b7f5d3
Unnecessary changes in DataSet.py have been removed.
JunyungKim Feb 19, 2023
3fcde82
ZDT test is added.
JunyungKim Feb 22, 2023
15debe4
Optimizer.py and RavenSampled.py are updated after having regression …
JunyungKim Feb 24, 2023
64510df
minor update on Optimizer.py
JunyungKim Feb 24, 2023
b1f0c3f
temporary fix, not the way I want
Jimmy-INL Mar 11, 2023
52389c3
NSGA-II testing fiels (multiSum wConstratint and ZDT1) are added.
JunyungKim Mar 13, 2023
391b9c3
moving models, xmls, and trying to resolve GD after converting object…
Jimmy-INL Mar 14, 2023
da9e0dd
fixing simulated annealing to accept a list of objectives
Jimmy-INL Mar 21, 2023
1fd2175
fixing rook to compare infs
Jimmy-INL Mar 22, 2023
7cedf83
Merge branch 'junyung-Mohammad-NSGAII' into JunyungKim-junyung-Mohamm…
Jimmy-INL Mar 22, 2023
305c2ac
making one mod in RAVENSAmpled
Jimmy-INL Apr 1, 2023
c820eea
making self._minMax a list
Jimmy-INL Apr 3, 2023
21bf42d
erroring out if type is not in ['min', 'max']
Jimmy-INL Apr 3, 2023
e639803
updating HERON to b316024
Jimmy-INL Apr 3, 2023
12e11f0
Merge branch 'devel' into enablingMinMaxList
Jimmy-INL Apr 3, 2023
be64a4d
updating dependencies
Jimmy-INL Apr 4, 2023
ccde4d9
Merge branch 'enablingMinMaxList' of github.com:Jimmy-INL/raven into …
Jimmy-INL Apr 4, 2023
95682a1
removing a trailing space
Jimmy-INL Apr 4, 2023
c3688e2
removing windows line endings
Jimmy-INL Apr 4, 2023
e25cc37
change to unix ending
Jimmy-INL Apr 5, 2023
f0d1412
adding the zdt_model.py
Jimmy-INL Apr 5, 2023
c2ca46e
converting zdt to unix line endings
Jimmy-INL Apr 5, 2023
1f1b969
Juan's change to simulateData for the interface
Jimmy-INL Apr 6, 2023
c7aebf3
resolving diff based on different batch Size, thanks @wangcj05
Jimmy-INL Apr 6, 2023
64e97a9
converting SimukateData.py to unix line endings
Jimmy-INL Apr 8, 2023
b29661b
regolding to print all batches in MOO
Jimmy-INL Apr 11, 2023
9626956
slight mods
Jimmy-INL Apr 12, 2023
34d5cb2
regolding and reverting inf in fitness
Jimmy-INL Apr 12, 2023
e0df314
trying to add all outputs to the rlz
Jimmy-INL Apr 12, 2023
c0476f7
adding everything to bestPoint
Jimmy-INL Apr 13, 2023
81dc580
chenging type==str to len(self._objectVar) == 1
Jimmy-INL Apr 13, 2023
3f27965
removing unnecessary if statement, this needs revisiting
Jimmy-INL Apr 18, 2023
facf74e
modifying reverting cycle length to its value not the inverse
Jimmy-INL Apr 20, 2023
a92049c
simulateData updating cost model.
Jun 12, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion dependencies.xml
Original file line number Diff line number Diff line change
Expand Up @@ -96,4 +96,4 @@ Note all install methods after "main" take
<nomkl>remove</nomkl>
<numexpr>remove</numexpr>
</alternate>
</dependencies>
</dependencies>
2 changes: 1 addition & 1 deletion plugins/TEAL
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

remove this update

534 changes: 411 additions & 123 deletions ravenframework/Optimizers/GeneticAlgorithm.py

Large diffs are not rendered by default.

17 changes: 11 additions & 6 deletions ravenframework/Optimizers/GradientDescent.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,7 @@ def __init__(self):
self._followerProximity = 1e-2 # distance at which annihilation can start occurring, in ?normalized? space
self._trajectoryFollowers = defaultdict(list) # map of trajectories to the trajectories following them
self._functionalConstraintExplorationLimit = 500 # number of input-space explorations allowable for functional constraints
self._canHandleMultiObjective = False # Currently Gradient Descent cannot handle multiobjective optimization
# __private
# additional methods
# register adaptive sample identification criteria
Expand Down Expand Up @@ -338,7 +339,11 @@ def _useRealization(self, info, rlz):
@ Out, None
"""
traj = info['traj']
optVal = rlz[self._objectiveVar]
# if not self._canHandleMultiObjective and len(self._objectiveVar) == 1:
# self._objectiveVar = self._objectiveVar[0]
if len(self._objectiveVar) > 1 and type(self._objectiveVar)==list:
self.raiseAnError(IOError, 'Gradient Descent does not support multiObjective optimization yet! objective variable must be a single variable for now!')
optVal = rlz[self._objectiveVar[0]]
info['optVal'] = optVal
purpose = info['purpose']
if purpose.startswith('opt'):
Expand All @@ -353,13 +358,13 @@ def _useRealization(self, info, rlz):
gradMag, gradVersor, _ = self._gradientInstance.evaluate(opt,
grads,
gradInfos,
self._objectiveVar)
self._objectiveVar[0])
self.raiseADebug(' ... gradient calculated ...')
self._gradHistory[traj].append((gradMag, gradVersor))
# get new step information
try:
newOpt, stepSize, stepInfo = self._stepInstance.step(opt,
objVar=self._objectiveVar,
objVar=self._objectiveVar[0],
optHist=self._optPointHistory[traj],
gradientHist=self._gradHistory[traj],
prevStepSize=self._stepHistory[traj],
Expand All @@ -378,7 +383,7 @@ def _useRealization(self, info, rlz):
except NoConstraintResolutionFound:
# we've tried everything, but we just can't hack it
self.raiseAMessage(f'Optimizer "{self.name}" trajectory {traj} was unable to continue due to functional or boundary constraints.')
self._closeTrajectory(traj, 'converge', 'no constraint resolution', opt[self._objectiveVar])
self._closeTrajectory(traj, 'converge', 'no constraint resolution', opt[self._objectiveVar[0]])
return

# update values if modified by constraint handling
Expand Down Expand Up @@ -598,7 +603,7 @@ def _checkAcceptability(self, traj, opt, optVal, info):
# Check acceptability
if self._optPointHistory[traj]:
old, _ = self._optPointHistory[traj][-1]
oldVal = old[self._objectiveVar]
oldVal = old[self._objectiveVar[0]]
# check if following another trajectory
if self._terminateFollowers:
following = self._stepInstance.trajIsFollowing(traj, self.denormalizeData(opt), info,
Expand Down Expand Up @@ -815,7 +820,7 @@ def _checkConvObjective(self, traj):
return False
o1, _ = self._optPointHistory[traj][-1]
o2, _ = self._optPointHistory[traj][-2]
delta = mathUtils.relativeDiff(o2[self._objectiveVar], o1[self._objectiveVar])
delta = mathUtils.relativeDiff(o2[self._objectiveVar[0]], o1[self._objectiveVar[0]])
converged = abs(delta) < self._convergenceCriteria['objective']
self.raiseADebug(self.convFormat.format(name='objective',
conv=str(converged),
Expand Down
14 changes: 9 additions & 5 deletions ravenframework/Optimizers/Optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,9 @@ def getInputSpecification(cls):
specs.description = 'Optimizers'

# objective variable
specs.addSub(InputData.parameterInputFactory('objective', contentType=InputTypes.StringType, strictMode=True,
specs.addSub(InputData.parameterInputFactory('objective', contentType=InputTypes.StringListType, strictMode=True,
printPriority=90, # more important than <variable>
descr=r"""Name of the response variable (or ``objective function'') that should be optimized
descr=r"""Name of the objective variable (or ``objective function'') that should be optimized
(minimized or maximized)."""))

# modify Sampler variable nodes
Expand All @@ -103,7 +103,8 @@ def getInputSpecification(cls):
descr=r"""seed for random number generation. Note that by default RAVEN uses an internal seed,
so this seed must be changed to observe changed behavior. \default{RAVEN-determined}""")
minMaxEnum = InputTypes.makeEnumType('MinMax', 'MinMaxType', ['min', 'max'])
minMax = InputData.parameterInputFactory('type', contentType=minMaxEnum,
minMaxList = InputTypes.StringListType()
minMax = InputData.parameterInputFactory('type', contentType=minMaxList,
descr=r"""the type of optimization to perform. \xmlString{min} will search for the lowest
\xmlNode{objective} value, while \xmlString{max} will search for the highest value.""")
init.addSub(seed)
Expand Down Expand Up @@ -161,7 +162,7 @@ def __init__(self):
# public
# _protected
self._seed = None # random seed to apply
self._minMax = 'min' # maximization or minimization?
self._minMax = ['min'] # maximization or minimization?
self._activeTraj = [] # tracks live trajectories
self._cancelledTraj = {} # tracks cancelled trajectories, and reasons
self._convergedTraj = {} # tracks converged trajectories, and values obtained
Expand Down Expand Up @@ -249,7 +250,6 @@ def handleInput(self, paramInput):
@ Out, None
"""
# the reading of variables (dist or func) and constants already happened in _readMoreXMLbase in Sampler
# objective var
self._objectiveVar = paramInput.findFirst('objective').value

# sampler init
Expand All @@ -264,6 +264,10 @@ def handleInput(self, paramInput):
minMax = init.findFirst('type')
if minMax is not None:
self._minMax = minMax.value
if len(self._minMax) != len(self._objectiveVar):
self.raiseAnError(IOError, 'type and objective must be of the same length!')
if list(set(self._minMax)-set(['min','max'])) != []:
self.raiseAnError(IOError, "type must be a list of 'min' and/or 'max'")

# variables additional reading
for varNode in paramInput.findAll('variable'):
Expand Down
209 changes: 150 additions & 59 deletions ravenframework/Optimizers/RavenSampled.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,8 +300,16 @@ def localFinalizeActualSampling(self, jobObject, model, myInput):
# # testing suggests no big deal on smaller problem
# the sign of the objective function is flipped in case we do maximization
# so get the correct-signed value into the realization
if self._minMax == 'max':
rlz[self._objectiveVar] *= -1

if 'max' in self._minMax:
if not self._canHandleMultiObjective and len(self._objectiveVar) == 1:
rlz[self._objectiveVar[0]] *= -1
elif type(self._objectiveVar) == list:
for i in range(len(self._objectiveVar)):
if self._minMax[i] == 'max':
rlz[self._objectiveVar[i]] *= -1
else:
rlz[self._objectiveVar] *= -1
# TODO FIXME let normalizeData work on an xr.DataSet (batch) not just a dictionary!
rlz = self.normalizeData(rlz)
self._useRealization(info, rlz)
Expand All @@ -312,57 +320,127 @@ def finalizeSampler(self, failedRuns):
@ In, failedRuns, list, runs that failed as part of this sampling
@ Out, None
"""
# get and print the best trajectory obtained
bestValue = None
bestTraj = None
bestPoint = None
s = -1 if self._minMax == 'max' else 1
# check converged trajectories
self.raiseAMessage('*' * 80)
self.raiseAMessage('Optimizer Final Results:')
self.raiseADebug('')
self.raiseADebug(' - Trajectory Results:')
self.raiseADebug(' TRAJ STATUS VALUE')
statusTemplate = ' {traj:2d} {status:^11s} {val: 1.3e}'
# print cancelled traj
for traj, info in self._cancelledTraj.items():
val = info['value']
status = info['reason']
self.raiseADebug(statusTemplate.format(status=status, traj=traj, val=s * val))
# check converged traj
for traj, info in self._convergedTraj.items():
if not self._canHandleMultiObjective or len(self._objectiveVar) == 1:
# get and print the best trajectory obtained
bestValue = None
bestTraj = None
bestPoint = None
s = -1 if 'max' in self._minMax else 1
# check converged trajectories
self.raiseAMessage('*' * 80)
self.raiseAMessage('Optimizer Final Results:')
self.raiseADebug('')
self.raiseADebug(' - Trajectory Results:')
self.raiseADebug(' TRAJ STATUS VALUE')
statusTemplate = ' {traj:2d} {status:^11s} {val: 1.3e}'
# print cancelled traj
for traj, info in self._cancelledTraj.items():
val = info['value']
status = info['reason']
self.raiseADebug(statusTemplate.format(status=status, traj=traj, val=s * val))
# check converged traj
for traj, info in self._convergedTraj.items():
opt = self._optPointHistory[traj][-1][0]
val = info['value']
self.raiseADebug(statusTemplate.format(status='converged', traj=traj, val=s * val))
if bestValue is None or val < bestValue:
bestTraj = traj
bestValue = val
# further check active unfinished trajectories
# FIXME why should there be any active, unfinished trajectories when we're cleaning up sampler?
traj = 0 # FIXME why only 0?? what if it's other trajectories that are active and unfinished?
# sanity check: if there's no history (we never got any answers) then report rather than crash
if len(self._optPointHistory[traj]) == 0:
self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' +
'Perhaps the Model failed?')
opt = self._optPointHistory[traj][-1][0]
val = info['value']
self.raiseADebug(statusTemplate.format(status='converged', traj=traj, val=s * val))
val = opt[self._objectiveVar[0]]
self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val))
if bestValue is None or val < bestValue:
bestTraj = traj
bestValue = val
# further check active unfinished trajectories
# FIXME why should there be any active, unfinished trajectories when we're cleaning up sampler?
traj = 0 # FIXME why only 0?? what if it's other trajectories that are active and unfinished?
# sanity check: if there's no history (we never got any answers) then report than rather than crash
if len(self._optPointHistory[traj]) == 0:
self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' +
'Perhaps the Model failed?')
opt = self._optPointHistory[traj][-1][0]
val = opt[self._objectiveVar]
self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val))
if bestValue is None or val < bestValue:
bestValue = val
bestTraj = traj
bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0])
bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled)
self.raiseADebug('')
self.raiseAMessage(' - Final Optimal Point:')
finalTemplate = ' {name:^20s} {value: 1.3e}'
finalTemplateInt = ' {name:^20s} {value: 3d}'
self.raiseAMessage(finalTemplate.format(name=self._objectiveVar, value=s * bestValue))
self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj))
for var, val in bestPoint.items():
self.raiseAMessage(finalTemplate.format(name=var, value=val))
self.raiseAMessage('*' * 80)
# write final best solution to soln export
self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None')
bestTraj = traj
bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0])
bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled)
self.raiseADebug('')
self.raiseAMessage(' - Final Optimal Point:')
finalTemplate = ' {name:^20s} {value: 1.3e}'
finalTemplateInt = ' {name:^20s} {value: 3d}'
self.raiseAMessage(finalTemplate.format(name=self._objectiveVar[0], value=s * bestValue))
self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj))
for var, val in bestPoint.items():
self.raiseAMessage(finalTemplate.format(name=var, value=val))
self.raiseAMessage('*' * 80)
# write final best solution to soln export
self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None')
else:
# get and print the best trajectory obtained
bestValue = None
bestTraj = None
bestPoint = None
s = -1 if self._minMax == 'max' else 1
# check converged trajectories
self.raiseAMessage('*' * 80)
self.raiseAMessage('Optimizer Final Results:')
self.raiseADebug('')
self.raiseADebug(' - Trajectory Results:')
self.raiseADebug(' TRAJ STATUS VALUE')
statusTemplate = ' {traj:2d} {status:^11s} {val: 1.3e}'
statusTemplate_multi = ' {traj:2d} {status:^11s} {val1: ^11s} {val2: ^11s}'

# print cancelled traj
for traj, info in self._cancelledTraj.items():
val = info['value']
status = info['reason']
self.raiseADebug(statusTemplate.format(status=status, traj=traj, val=s * val))
# check converged traj
for traj, info in self._convergedTraj.items():
opt = self._optPointHistory[traj][-1][0]
val = info['value']
self.raiseADebug(statusTemplate.format(status='converged', traj=traj, val=s * val))
if bestValue is None or val < bestValue:
bestTraj = traj
bestValue = val
# further check active unfinished trajectories
# FIXME why should there be any active, unfinished trajectories when we're cleaning up sampler?
traj = 0 # FIXME why only 0?? what if it's other trajectories that are active and unfinished?
# sanity check: if there's no history (we never got any answers) then report rather than crash
if len(self._optPointHistory[traj]) == 0:
self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' +
'Perhaps the Model failed?')

if type(self._objectiveVar) == str:
opt = self._optPointHistory[traj][-1][0]
val = opt[self._objectiveVar]
self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val))
if bestValue is None or val < bestValue:
bestValue = val
bestTraj = traj
bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0])
bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled)
self.raiseADebug('')
self.raiseAMessage(' - Final Optimal Point:')
finalTemplate = ' {name:^20s} {value: 1.3e}'
finalTemplateInt = ' {name:^20s} {value: 3d}'
# self.raiseAMessage(finalTemplate.format(name=self._objectiveVar, value=s * bestValue))
self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj))
for var, val in bestPoint.items():
self.raiseAMessage(finalTemplate.format(name=var, value=val))
self.raiseAMessage('*' * 80)
# write final best solution to soln export
self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None')
else:
for i in range(len(self._optPointHistory[traj][-1][0][self._objectiveVar[0]])):
opt = self._optPointHistory[traj][-1][0]
key = list(opt.keys())
val = [item[i] for item in opt.values()]
optElm = {key[a]: val[a] for a in range(len(key))}
optVal = [(-1*(self._minMax[b]=='max')+(self._minMax[b]=='min'))*optElm[self._objectiveVar[b]] for b in range(len(self._objectiveVar))]

bestTraj = traj
bestOpt = self.denormalizeData(optElm)
bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled)

self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None')

def flush(self):
"""
Expand Down Expand Up @@ -498,10 +576,13 @@ def _handleImplicitConstraints(self, previous):
@ Out, accept, bool, whether point was satisfied implicit constraints
"""
normed = copy.deepcopy(previous)
oldVal = normed[self._objectiveVar]
normed.pop(self._objectiveVar, oldVal)
if type(self._objectiveVar) == str:
oldVal = normed[self._objectiveVar]
else:
oldVal = normed[self._objectiveVar[0]]
normed.pop(self._objectiveVar[0], oldVal)
denormed = self.denormalizeData(normed)
denormed[self._objectiveVar] = oldVal
denormed[self._objectiveVar[0]] = oldVal
accept = self._checkImpFunctionalConstraints(denormed)

return accept
Expand Down Expand Up @@ -569,9 +650,12 @@ def _resolveNewOptPoint(self, traj, rlz, optVal, info):
# TODO could we ever use old rerun gradients to inform the gradient direction as well?
self._rerunsSinceAccept[traj] += 1
N = self._rerunsSinceAccept[traj] + 1
oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar]
if type(self._objectiveVar) == str:
oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar]
else:
oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar[0]]
newAvg = ((N-1)*oldVal + optVal) / N
self._optPointHistory[traj][-1][0][self._objectiveVar] = newAvg
self._optPointHistory[traj][-1][0][self._objectiveVar[0]] = newAvg
else:
self.raiseAnError(f'Unrecognized acceptability: "{acceptable}"')

Expand Down Expand Up @@ -635,10 +719,17 @@ def _updateSolutionExport(self, traj, rlz, acceptable, rejectReason):
'rejectReason': rejectReason
})
# optimal point input and output spaces
objValue = rlz[self._objectiveVar]
if self._minMax == 'max':
objValue *= -1
toExport[self._objectiveVar] = objValue
if len(self._objectiveVar) == 1: # Single Objective Optimization
objValue = rlz[self._objectiveVar[0]]
if 'max' in self._minMax:
objValue *= -1
toExport[self._objectiveVar[0]] = objValue
else: # Multi Objective Optimization
for i in range(len(self._objectiveVar)):
objValue = rlz[self._objectiveVar[i]]
if self._minMax[i] == 'max':
objValue *= -1
toExport[self._objectiveVar[i]] = objValue
toExport.update(self.denormalizeData(dict((var, rlz[var]) for var in self.toBeSampled)))
# constants and functions
toExport.update(self.constants)
Expand Down
Loading