-
Notifications
You must be signed in to change notification settings - Fork 36
/
Copy pathhcc.py
183 lines (167 loc) · 9.32 KB
/
hcc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
import time
import numpy as np
from pypop7.optimizers.es.lmcma import LMCMA # for upper-level optimization
from pypop7.optimizers.es.cmaes import CMAES # for lower-level optimization
from pypop7.optimizers.cc import CC
class HCC(CC):
"""Hierarchical Cooperative Co-evolution (HCC).
Parameters
----------
problem : dict
problem arguments with the following common settings (`keys`):
* 'fitness_function' - objective function to be **minimized** (`func`),
* 'ndim_problem' - number of dimensionality (`int`),
* 'upper_boundary' - upper boundary of search range (`array_like`),
* 'lower_boundary' - lower boundary of search range (`array_like`).
options : dict
optimizer options with the following common settings (`keys`):
* 'max_function_evaluations' - maximum of function evaluations (`int`, default: `np.inf`),
* 'max_runtime' - maximal runtime to be allowed (`float`, default: `np.inf`),
* 'seed_rng' - seed for random number generation needed to be *explicitly* set (`int`);
and with the following particular setting (`key`):
* 'n_individuals' - number of individuals/samples, aka population size (`int`, default: `100`).
* 'sigma' - initial global step-size (`float`, default:
`problem['upper_boundary'] - problem['lower_boundary']/3.0`),
* 'ndim_subproblem' - dimensionality of subproblem for decomposition (`int`, default: `30`).
Examples
--------
Use the optimizer `HCC` to minimize the well-known test function
`Rosenbrock <http://en.wikipedia.org/wiki/Rosenbrock_function>`_:
.. code-block:: python
:linenos:
>>> import numpy
>>> from pypop7.benchmarks.base_functions import rosenbrock # function to be minimized
>>> from pypop7.optimizers.cc.hcc import HCC
>>> problem = {'fitness_function': rosenbrock, # define problem arguments
... 'ndim_problem': 2,
... 'lower_boundary': -5.0*numpy.ones((2,)),
... 'upper_boundary': 5.0*numpy.ones((2,))}
>>> options = {'max_function_evaluations': 5000, # set optimizer options
... 'seed_rng': 2022}
>>> hcc = HCC(problem, options) # initialize the optimizer class
>>> results = hcc.optimize() # run the optimization process
>>> # return the number of function evaluations and best-so-far fitness
>>> print(f"HCC: {results['n_function_evaluations']}, {results['best_so_far_y']}")
HCC: 5000, 0.0057391910865252
For its correctness checking of coding, we cannot provide the code-based repeatability report, since this
implementation combines two different papers. To our knowledge, few well-designed open-source code of `CC` is
available for non-separable black-box optimization.
Attributes
----------
n_individuals : `int`
number of individuals/samples, aka population size.
sigma : `float`
initial global step-size.
ndim_subproblem : `int`
dimensionality of subproblem for decomposition.
References
----------
Mei, Y., Omidvar, M.N., Li, X. and Yao, X., 2016.
A competitive divide-and-conquer algorithm for unconstrained large-scale black-box optimization.
ACM Transactions on Mathematical Software, 42(2), pp.1-24.
https://dl.acm.org/doi/10.1145/2791291
Gomez, F.J. and Schmidhuber, J., 2005, June.
Co-evolving recurrent neurons learn deep memory POMDPs.
In Proceedings of Annual Conference on Genetic and Evolutionary Computation (pp. 491-498). ACM.
https://dl.acm.org/doi/10.1145/1068009.1068092
"""
def __init__(self, problem, options):
CC.__init__(self, problem, options)
self.sigma = options.get('sigma') # global step size
assert self.sigma is None or self.sigma > 0.0
self.ndim_subproblem = int(options.get('ndim_subproblem', 30))
assert self.ndim_subproblem > 0
def initialize(self, arg=None):
# for lower-level CMA-ES
self.best_so_far_x = self.rng_initialization.uniform(self.initial_lower_boundary, self.initial_upper_boundary)
self.best_so_far_y = self._evaluate_fitness(self.best_so_far_x, arg)
sub_optimizers = []
for i in range(int(np.ceil(self.ndim_problem/self.ndim_subproblem))):
ii = range(i*self.ndim_subproblem, np.minimum((i + 1)*self.ndim_subproblem, self.ndim_problem))
problem = {'ndim_problem': len(ii), # cyclic decomposition
'lower_boundary': self.lower_boundary[ii],
'upper_boundary': self.upper_boundary[ii]}
if self.sigma is None:
sigma = np.min((self.upper_boundary[ii] - self.lower_boundary[ii])/3.0)
else:
sigma = self.sigma
options = {'seed_rng': self.rng_initialization.integers(np.iinfo(np.int64).max),
'sigma': sigma,
'max_runtime': self.max_runtime,
'fitness_threshold': self.fitness_threshold,
'verbose': False}
cma = CMAES(problem, options)
cma.start_time = time.time()
sub_optimizers.append(cma)
# for upper-level LM-CMA
problem = {'ndim_problem': self.ndim_problem,
'lower_boundary': self.lower_boundary,
'upper_boundary': self.upper_boundary}
if self.sigma is None:
sigma = np.min((self.upper_boundary - self.lower_boundary)/3.0)
else:
sigma = self.sigma
options = {'seed_rng': self.rng_initialization.integers(np.iinfo(np.int64).max),
'sigma': sigma,
'max_runtime': self.max_runtime,
'fitness_threshold': self.fitness_threshold,
'verbose': False}
lmcma = LMCMA(problem, options)
return sub_optimizers, self.best_so_far_y, lmcma
def optimize(self, fitness_function=None, args=None):
fitness, is_initialization = CC.optimize(self, fitness_function), True
sub_optimizers, y, lmcma = self.initialize(args)
# run upper-level LM-CMA
lm_mean, lm_x, lm_p_c, lm_s, lm_vm, lm_pm, lm_b, lm_d, lm_y = lmcma.initialize() # for upper-level LM-CMA
lmcma.start_time = time.time()
lmcma.fitness_function = self._evaluate_fitness
# run lower-level CMA-ES
x_s, mean_s, ps_s, pc_s, cm_s, ee_s, ea_s, y_s, d_s = [], [], [], [], [], [], [], [], []
while not self._check_terminations():
self._print_verbose_info(fitness, y)
y = []
# run upper-level LM-CMA
lm_y_bak = np.copy(lm_y)
lmcma.max_function_evaluations = (lmcma.n_function_evaluations +
self.max_function_evaluations - self.n_function_evaluations)
lm_x, lm_y = lmcma.iterate(lm_mean, lm_x, lm_pm, lm_vm, lm_y, lm_b, args)
lm_mean, lm_p_c, lm_s, lm_vm, lm_pm, lm_b, lm_d = lmcma.update_distribution(
lm_mean, lm_x, lm_p_c, lm_s, lm_vm, lm_pm, lm_b, lm_d, lm_y, lm_y_bak)
y.extend(lm_y)
# run lower-level CMA-ES
if is_initialization:
is_initialization = False
for i, opt in enumerate(sub_optimizers):
if self._check_terminations():
break
x, mean, p_s, p_c, cm, e_ve, e_va, yy, d = opt.initialize()
x_s.append(x)
mean_s.append(mean)
ps_s.append(p_s)
pc_s.append(p_c)
cm_s.append(cm)
ee_s.append(e_ve)
ea_s.append(e_va)
y_s.append(yy)
d_s.append(d)
else:
for i, opt in enumerate(sub_optimizers):
ii = range(i*self.ndim_subproblem, np.minimum((i + 1)*self.ndim_subproblem, self.ndim_problem))
def sub_function(sub_x): # to define sub-function for each sub-optimizer
best_so_far_x = np.copy(self.best_so_far_x)
best_so_far_x[ii] = sub_x
return self._evaluate_fitness(best_so_far_x, args)
opt.fitness_function = sub_function
opt.max_function_evaluations = (opt.n_function_evaluations +
self.max_function_evaluations - self.n_function_evaluations)
x_s[i], y_s[i], d_s[i] = opt.iterate(x_s[i], mean_s[i], ee_s[i], ea_s[i], y_s[i], d_s[i], args)
y.extend(y_s[i])
if self._check_terminations():
break
opt._n_generations += 1
mean_s[i], ps_s[i], pc_s[i], cm_s[i], ee_s[i], ea_s[i] = opt.update_distribution(
x_s[i], ps_s[i], pc_s[i], cm_s[i], ee_s[i], ea_s[i], y_s[i], d_s[i])
if self.best_so_far_y < lmcma.best_so_far_y: # to communicate between upper and lower levels
lm_mean = np.copy(self.best_so_far_x)
self._n_generations += 1
return self._collect(fitness, y)