-
Notifications
You must be signed in to change notification settings - Fork 178
/
evolution_strategy.py
4574 lines (4056 loc) · 213 KB
/
evolution_strategy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# -*- coding: utf-8 -*-
"""CMA-ES (evolution strategy), the main sub-module of `cma` implementing
in particular `CMAEvolutionStrategy`, `fmin2` and further ``fmin_*``
functions.
"""
# TODO (mainly done): remove separable CMA within the code (keep as sampler only)
# TODO (low): implement a (deep enough) copy-constructor for class
# CMAEvolutionStrategy to repeat the same step in different
# configurations for online-adaptation of meta parameters
# TODO (complex): reconsider geno-pheno transformation. Can it be a
# separate module that operates inbetween optimizer and objective?
# Can we still propagate a repair of solutions to the optimizer?
# A repair-only internal geno-pheno transformation is less
# problematic, given the repair is idempotent. In any case, consider
# passing a repair function in the interface instead.
# How about gradients (should be fine)?
# Must be *thoroughly* explored before to switch, in particular the
# order of application of repair and other transformations, as the
# internal repair can only operate on the internal representation.
# TODO: split tell into a variable transformation part and the "pure"
# functionality
# usecase: es.tell_geno(X, [func(es.pheno(x)) for x in X])
# genotypic repair is not part of tell_geno
# TODO: self.opts['mindx'] is checked without sigma_vec, which is a little
# inconcise. Cheap solution: project sigma_vec on smallest eigenvector?
# TODO: class _CMAStopDict implementation looks way too complicated,
# design generically from scratch?
# TODO: separate display and logging options, those CMAEvolutionStrategy
# instances don't use themselves (probably all?)
# TODO: check scitools.easyviz and how big the adaptation would be
# TODO: separate initialize==reset_state from __init__
# TODO: keep best ten solutions
# TODO: implement constraints handling
# TODO: eigh(): thorough testing would not hurt
# TODO: (partly done) apply style guide
# WON'T FIX ANYTIME SOON (done within fmin): implement bipop in a separate
# algorithm as meta portfolio algorithm of IPOP and a local restart
# option to be implemented
# in fmin (e.g. option restart_mode in [IPOP, local])
# DONE: extend function unitdoctest, or use unittest?
# DONE: copy_always optional parameter does not make much sense,
# as one can always copy the input argument first. Similar,
# copy_if_changed should be keep_arg_unchanged or just copy
# DONE: expand/generalize to dynamically read "signals" from a file
# see import ConfigParser, DictFromTagsInString,
# function read_properties, or myproperties.py (to be called after
# tell()), signals_filename, if given, is parsed in stop()
# DONE: switch to np.loadtxt
#
# typical parameters in scipy.optimize: disp, xtol, ftol, maxiter, maxfun,
# callback=None
# maxfev, diag (A sequency of N positive entries that serve as
# scale factors for the variables.)
# full_output -- non-zero to return all optional outputs.
# If xtol < 0.0, xtol is set to sqrt(machine_precision)
# 'infot -- a dictionary of optional outputs with the keys:
# 'nfev': the number of function calls...
#
# see eg fmin_powell
# typical returns
# x, f, dictionary d
# (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag},
# <allvecs>)
#
# changes:
# 20/04/xx: no negative weights for injected solutions
# 16/10/xx: versatile options are read from signals_filename
# RecombinationWeights refined and work without numpy
# new options: recombination_weights, timeout,
# integer_variable with basic integer handling
# step size parameters removed from CMAEvolutionStrategy class
# ComposedFunction class implements function composition
# 16/10/02: copy_always parameter is gone everywhere, use
# np.array(., copy=True)
# 16/xx/xx: revised doctests with doctest: +ELLIPSIS option, test call(s)
# moved all test related to test.py, is quite clean now
# "python -m cma.test" is how it works now
# 16/xx/xx: cleaning up, all kind of larger changes.
# 16/xx/xx: single file cma.py broken into pieces such that cma has now
# become a package.
# 15/02/xx: (v1.2) sampling from the distribution sampling refactorized
# in class Sampler which also does the (natural gradient)
# update. New AdaptiveDecoding class for sigma_vec.
# 15/01/26: bug fix in multiplyC with sep/diagonal option
# 15/01/20: larger condition numbers for C realized by using tf_pheno
# of GenoPheno attribute gp.
# 15/01/19: injection method, first implementation, short injections
# and long injections with good fitness need to be addressed yet
# 15/01/xx: _prepare_injection_directions to simplify/centralize injected
# solutions from mirroring and TPA
# 14/12/26: bug fix in correlation_matrix computation if np.diag is a view
# 14/12/06: meta_parameters now only as annotations in ## comments
# 14/12/03: unified use of base class constructor call, now always
# super(ThisClass, self).__init__(args_for_base_class_constructor)
# 14/11/29: termination via "stop now" in file cmaes_signals.par
# 14/11/28: bug fix initialization of C took place before setting the
# seed. Now in some dimensions (e.g. 10) results are (still) not
# determistic due to np.linalg.eigh, in some dimensions (<9, 12)
# they seem to be deterministic.
# 14/11/23: bipop option integration, contributed by Petr Baudis
# 14/09/30: initial_elitism option added to fmin
# 14/08/1x: developing fitness wrappers in FFWrappers class
# 14/08/xx: return value of OOOptimizer.optimize changed to self.
# CMAOptions now need to uniquely match an *initial substring*
# only (via method corrected_key).
# Bug fix in CMAEvolutionStrategy.stop: termination conditions
# are now recomputed iff check and self.countiter > 0.
# Doc corrected that self.gp.geno _is_ applied to x0
# Vaste reorganization/modularization/improvements of plotting
# 14/08/01: bug fix to guaranty pos. def. in active CMA
# 14/06/04: gradient of f can now be used with fmin and/or ask
# 14/05/11: global rcParams['font.size'] not permanently changed anymore,
# a little nicer annotations for the plots
# 14/05/07: added method result_pretty to pretty print optimization result
# 14/05/06: associated show() everywhere with ion() which should solve the
# blocked terminal problem
# 14/05/05: all instances of "unicode" removed (was incompatible to 3.x)
# 14/05/05: replaced type(x) == y with isinstance(x, y), reorganized the
# comments before the code starts
# 14/05/xx: change the order of kwargs of OOOptimizer.optimize,
# remove prepare method in AdaptSigma classes, various changes/cleaning
# 14/03/01: bug fix BoundaryHandlerBase.has_bounds didn't check lower bounds correctly
# bug fix in BoundPenalty.repair len(bounds[0]) was used instead of len(bounds[1])
# bug fix in GenoPheno.pheno, where x was not copied when only boundary-repair was applied
# 14/02/27: bug fixed when BoundPenalty was combined with fixed variables.
# 13/xx/xx: step-size adaptation becomes a class derived from CMAAdaptSigmaBase,
# to make testing different adaptation rules (much) easier
# 12/12/14: separated CMAOptions and arguments to fmin
# 12/10/25: removed useless check_points from fmin interface
# 12/10/17: bug fix printing number of infeasible samples, moved not-in-use methods
# timesCroot and divCroot to the right class
# 12/10/16 (0.92.00): various changes commit: bug bound[0] -> bounds[0], more_to_write fixed,
# sigma_vec introduced, restart from elitist, trace normalization, max(mu,popsize/2)
# is used for weight calculation.
# 12/07/23: (bug:) BoundPenalty.update respects now genotype-phenotype transformation
# 12/07/21: convert value True for noisehandling into 1 making the output compatible
# 12/01/30: class Solution and more old stuff removed r3101
# 12/01/29: class Solution is depreciated, GenoPheno and SolutionDict do the job (v0.91.00, r3100)
# 12/01/06: CMA_eigenmethod option now takes a function (integer still works)
# 11/09/30: flat fitness termination checks also history length
# 11/09/30: elitist option (using method clip_or_fit_solutions)
# 11/09/xx: method clip_or_fit_solutions for check_points option for all sorts of
# injected or modified solutions and even reliable adaptive encoding
# 11/08/19: fixed: scaling and typical_x type clashes 1 vs array(1) vs ones(dim) vs dim * [1]
# 11/07/25: fixed: fmin wrote first and last line even with verb_log==0
# fixed: method settableOptionsList, also renamed to versatileOptions
# default seed depends on time now
# 11/07/xx (0.9.92): added: active CMA, selective mirrored sampling, noise/uncertainty handling
# fixed: output argument ordering in fmin, print now only used as function
# removed: parallel option in fmin
# 11/07/01: another try to get rid of the memory leak by replacing self.unrepaired = self[:]
# 11/07/01: major clean-up and reworking of abstract base classes and of the documentation,
# also the return value of fmin changed and attribute stop is now a method.
# 11/04/22: bug-fix: option fixed_variables in combination with scaling
# 11/04/21: stopdict is not a copy anymore
# 11/04/15: option fixed_variables implemented
# 11/03/23: bug-fix boundary update was computed even without boundaries
# 11/03/12: bug-fix of variable annotation in plots
# 11/02/05: work around a memory leak in numpy
# 11/02/05: plotting routines improved
# 10/10/17: cleaning up, now version 0.9.30
# 10/10/17: bug-fix: return values of fmin now use phenotyp (relevant
# if input scaling_of_variables is given)
# 08/10/01: option evalparallel introduced,
# bug-fix for scaling being a vector
# 08/09/26: option CMAseparable becomes CMA_diagonal
# 08/10/18: some names change, test functions go into a class
# 08/10/24: more refactorizing
# 10/03/09: upper bound np.exp(min(1,...)) for step-size control
from __future__ import (absolute_import, division, print_function,
) # unicode_literals, with_statement)
import collections # deque since Python 2.4, defaultdict since 2.5, namedtuple() since 2.6
# from builtins import ...
from .utilities.python3for2 import range # redefine range in Python 2
import sys
import os
import time # not really essential
import warnings # catch numpy warnings
import ast # for literal_eval
import numpy as np
# arange, cos, size, eye, inf, dot, floor, outer, zeros, linalg.eigh,
# sort, argsort, random, ones,...
from numpy import inf, array
# to access the built-in sum fct: ``__builtins__.sum`` or ``del sum``
# removes the imported sum and recovers the shadowed build-in
# import logging
# logging.basicConfig(level=logging.INFO) # only works before logging is used
# logging.info('message') # prints INFO:root:message on red background
# logger = logging.getLogger(__name__) # should not be done during import
# logger.info('message') # prints INFO:cma...:message on red background
# see https://fangpenlin.com/posts/2012/08/26/good-logging-practice-in-python/
from . import interfaces
from . import options_parameters
from . import transformations
from . import optimization_tools as ot
from . import sampler
from .utilities import utils
from .options_parameters import CMAOptions, cma_default_options
from . import constraints_handler as _constraints_handler
from cma import fitness_models as _fitness_models
from .constraints_handler import BoundNone, BoundPenalty, BoundTransform, AugmentedLagrangian
from .integer_centering import IntegerCentering
from .logger import CMADataLogger # , disp, plot
from .utilities.utils import BlancClass as _BlancClass
from .utilities.utils import rglen #, global_verbosity
from .utilities.utils import seval as eval
from .utilities.utils import SolutionDict as _SolutionDict
from .utilities.math import Mh
from .sigma_adaptation import *
from . import restricted_gaussian_sampler as _rgs
_where = np.nonzero # to make pypy work, this is how where is used here anyway
del division, print_function, absolute_import #, unicode_literals, with_statement
class InjectionWarning(UserWarning):
"""Injected solutions are not passed to tell as expected"""
def _callable_to_list(c):
"""A `callable` is wrapped and `None` defaults to the empty list.
All other values, in particular `False`, remain unmodified on return.
"""
if c is None:
return []
elif callable(c):
return [c]
return c
def _pass(*args, **kwargs):
"""a callable that does nothing and return args[0] in case"""
return args[0] if args else None
# use_archives uses collections
use_archives = sys.version_info[0] >= 3 or sys.version_info[1] >= 6
# use_archives = False # on False some unit tests fail
"""speed up for very large population size. `use_archives` prevents the
need for an inverse gp-transformation, relies on collections module,
not sure what happens if set to ``False``. """
class _CMASolutionDict_functional(_SolutionDict):
def __init__(self, *args, **kwargs):
# _SolutionDict.__init__(self, *args, **kwargs)
super(_CMASolutionDict, self).__init__(*args, **kwargs)
self.last_solution_index = 0
# TODO: insert takes 30% of the overall CPU time, mostly in def key()
# with about 15% of the overall CPU time
def insert(self, key, geno=None, iteration=None, fitness=None,
value=None, cma_norm=None):
"""insert an entry with key ``key`` and value
``value if value is not None else {'geno':key}`` and
``self[key]['kwarg'] = kwarg if kwarg is not None`` for the further kwargs.
"""
# archive returned solutions, first clean up archive
if iteration is not None and iteration > self.last_iteration and (iteration % 10) < 1:
self.truncate(300, iteration - 3)
elif value is not None and value.get('iteration'):
iteration = value['iteration']
if (iteration % 10) < 1:
self.truncate(300, iteration - 3)
self.last_solution_index += 1
if value is not None:
try:
iteration = value['iteration']
except:
pass
if iteration is not None:
if iteration > self.last_iteration:
self.last_solution_index = 0
self.last_iteration = iteration
else:
iteration = self.last_iteration + 0.5 # a hack to get a somewhat reasonable value
if value is not None:
self[key] = value
else:
self[key] = {'pheno': key}
if geno is not None:
self[key]['geno'] = geno
if iteration is not None:
self[key]['iteration'] = iteration
if fitness is not None:
self[key]['fitness'] = fitness
if cma_norm is not None:
self[key]['cma_norm'] = cma_norm
return self[key]
class _CMASolutionDict_empty(dict):
"""a hack to get most code examples running"""
def insert(self, *args, **kwargs):
pass
def get(self, key):
return None
def __getitem__(self, key):
return None
def __setitem__(self, key, value):
pass
_CMASolutionDict = _CMASolutionDict_functional if use_archives else _CMASolutionDict_empty
# _CMASolutionDict = _CMASolutionDict_empty
# ____________________________________________________________
# ____________________________________________________________
# check out built-in package abc: class ABCMeta, abstractmethod, abstractproperty...
# see http://docs.python.org/whatsnew/2.6.html PEP 3119 abstract base classes
#
_debugging = False # not in use
_new_injections = True
_assertions_quadratic = True # issue warnings
_assertions_cubic = True
_depreciated = True
class CMAEvolutionStrategyResult(collections.namedtuple(
'CMAEvolutionStrategyResult', [
'xbest',
'fbest',
'evals_best',
'evaluations',
'iterations',
'xfavorite',
'stds',
'stop',
])):
"""A results tuple from `CMAEvolutionStrategy` property ``result``.
This tuple contains in the given position and as attribute
- 0 ``xbest`` best solution evaluated
- 1 ``fbest`` objective function value of best solution
- 2 ``evals_best`` evaluation count when ``xbest`` was evaluated
- 3 ``evaluations`` evaluations overall done
- 4 ``iterations``
- 5 ``xfavorite`` distribution mean in "phenotype" space, to be
considered as current best estimate of the optimum
- 6 ``stds`` effective standard deviations, can be used to
compute a lower bound on the expected coordinate-wise distance
to the true optimum, which is (very) approximately stds[i] *
dimension**0.5 / min(mueff, dimension) / 1.5 / 5 ~ std_i *
dimension**0.5 / min(popsize / 2, dimension) / 5, where
dimension = CMAEvolutionStrategy.N and mueff =
CMAEvolutionStrategy.sp.weights.mueff ~ 0.3 * popsize.
- 7 ``stop`` termination conditions in a dictionary
The penalized best solution of the last completed iteration can be
accessed via attribute ``pop_sorted[0]`` of `CMAEvolutionStrategy`
and the respective objective function value via ``fit.fit[0]``.
Details:
- This class is of purely declarative nature and for providing
this docstring. It does not provide any further functionality.
- ``list(fit.fit).find(0)`` is the index of the first sampled
solution of the last completed iteration in ``pop_sorted``.
"""
class _CMAEvolutionStrategyResult(tuple):
"""A results tuple from `CMAEvolutionStrategy` property ``result``.
This tuple contains in the given position
- 0 best solution evaluated, ``xbest``
- 1 objective function value of best solution, ``f(xbest)``
- 2 evaluation count when ``xbest`` was evaluated
- 3 evaluations overall done
- 4 iterations
- 5 distribution mean in "phenotype" space, to be considered as
current best estimate of the optimum
- 6 effective standard deviations, give a lower bound on the expected
coordinate-wise distance to the true optimum of (very) approximately
std_i * dimension**0.5 / min(mueff, dimension) / 1.2 / 5
~ std_i * dimension**0.5 / min(popsize / 0.4, dimension) / 5, where
mueff = CMAEvolutionStrategy.sp.weights.mueff ~ 0.3 * popsize.
The penalized best solution of the last completed iteration can be
accessed via attribute ``pop_sorted[0]`` of `CMAEvolutionStrategy`
and the respective objective function value via ``fit.fit[0]``.
Details:
- This class is of purely declarative nature and for providing this
docstring. It does not provide any further functionality.
- ``list(fit.fit).find(0)`` is the index of the first sampled solution
of the last completed iteration in ``pop_sorted``.
""" # here starts the code: (beating the code folding glitch)
# remark: a tuple is immutable, hence we cannot change it anymore
# in __init__. This would work if we inherited from a `list`.
@staticmethod
def _generate(self):
"""return a results tuple of type `CMAEvolutionStrategyResult`.
`_generate` is a surrogate for the ``__init__`` method, which
cannot be used to initialize the immutable `tuple` super class.
"""
return _CMAEvolutionStrategyResult(
self.best.get() + ( # (x, f, evals) triple
self.countevals,
self.countiter,
self.gp.pheno(self.mean[:], into_bounds=self.boundary_handler.repair),
self.stds)) #
class CMAEvolutionStrategy(interfaces.OOOptimizer):
"""CMA-ES stochastic optimizer class with ask-and-tell interface.
Calling Sequences
=================
- ``es = CMAEvolutionStrategy(x0, sigma0)``
- ``es = CMAEvolutionStrategy(x0, sigma0, opts)``
- ``es = CMAEvolutionStrategy(x0, sigma0).optimize(objective_fct)``
- ::
res = CMAEvolutionStrategy(x0, sigma0,
opts).optimize(objective_fct).result
Arguments
=========
`x0`
initial solution, starting point. `x0` is given as "phenotype"
which means, if::
opts = {'transformation': [transform, inverse]}
is given and ``inverse is None``, the initial mean is not
consistent with `x0` in that ``transform(mean)`` does not
equal to `x0` unless ``transform(mean)`` equals ``mean``.
`sigma0`
initial standard deviation. The problem variables should
have been scaled, such that a single standard deviation
on all variables is useful and the optimum is expected to
lie within about `x0` +- ``3*sigma0``. Often one wants to
check for solutions close to the initial point. This allows,
for example, for an easier check of consistency of the
objective function and its interfacing with the optimizer.
In this case, a much smaller `sigma0` is advisable.
`opts`
options, a dictionary with optional settings,
see class `CMAOptions`.
Main interface / usage
======================
The interface is inherited from the generic `OOOptimizer`
class (see also there). An object instance is generated from::
es = cma.CMAEvolutionStrategy(8 * [0.5], 0.2)
The least verbose interface is via the optimize method::
es.optimize(objective_func)
res = es.result
More verbosely, the optimization is done using the
methods `stop`, `ask`, and `tell`::
while not es.stop():
solutions = es.ask()
es.tell(solutions, [cma.ff.rosen(s) for s in solutions])
es.disp()
es.result_pretty()
where `ask` delivers new candidate solutions and `tell` updates
the `optim` instance by passing the respective function values
(the objective function `cma.ff.rosen` can be replaced by any
properly defined objective function, see `cma.ff` for more
examples).
To change an option, for example a termination condition to
continue the optimization, call::
es.opts.set({'tolfacupx': 1e4})
The class `CMAEvolutionStrategy` also provides::
(solutions, func_values) = es.ask_and_eval(objective_func)
and an entire optimization can also be written like::
while not es.stop():
es.tell(*es.ask_and_eval(objective_func))
Besides for termination criteria, in CMA-ES only the ranks of the
`func_values` are relevant.
Attributes and Properties
=========================
- `inputargs`: passed input arguments
- `inopts`: passed options
- `opts`: actually used options, some of them can be changed any
time via ``opts.set``, see class `CMAOptions`
- `popsize`: population size lambda, number of candidate
solutions returned by `ask` ()
- `logger`: a `CMADataLogger` instance utilized by `optimize`
Examples
========
Super-short example, with output shown:
>>> import cma
>>> # construct an object instance in 4-D, sigma0=1:
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1, {'seed':234})
... # doctest: +ELLIPSIS
(4_w,8)-aCMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=234...)
and optimize the ellipsoid function
>>> es.optimize(cma.ff.elli, verb_disp=1) # doctest: +ELLIPSIS
Iterat #Fevals function value axis ratio sigma min&max std t[m:s]
1 8 2.09...
>>> assert len(es.result) == 8, es.result
>>> assert es.result[1] < 1e-9, es.result
The optimization loop can also be written explicitly:
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1) # doctest: +ELLIPSIS
(4_w,8)-aCMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=...
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [cma.ff.elli(x) for x in X])
... es.disp() # doctest: +ELLIPSIS
Iterat #Fevals function value axis ratio sigma min&max std t[m:s]
1 8 ...
achieving the same result as above.
An example with lower bounds (at zero) and handling infeasible
solutions:
>>> import numpy as np
>>> es = cma.CMAEvolutionStrategy(10 * [0.2], 0.5,
... {'bounds': [0, np.inf]}) #doctest: +ELLIPSIS
(5_w,...
>>> while not es.stop():
... fit, X = [], []
... while len(X) < es.popsize:
... curr_fit = None
... while curr_fit in (None, np.nan):
... x = es.ask(1)[0]
... curr_fit = cma.ff.somenan(x, cma.ff.elli) # might return np.nan
... X.append(x)
... fit.append(curr_fit)
... es.tell(X, fit)
... es.logger.add()
... es.disp() #doctest: +ELLIPSIS
Itera...
>>>
>>> assert es.result[1] < 1e-9, es.result
>>> assert es.result[2] < 9000, es.result # by internal termination
>>> # es.logger.plot() # will plot data
>>> # cma.s.figshow() # display plot window
An example with user-defined transformation, in this case to realize
a lower bound of 2.
>>> import warnings
>>> with warnings.catch_warnings(record=True) as warns:
... es = cma.CMAEvolutionStrategy(5 * [3], 0.1,
... {"transformation": [lambda x: x**2+1.2, None],
... "ftarget": 1e-7 + 5.54781521192,
... "verbose": -2,})
>>> warns[0].message # doctest:+ELLIPSIS
UserWarning('in class GenoPheno: user defined transformations have not been tested thoroughly (...
>>> warns[1].message # doctest:+ELLIPSIS
UserWarning('computed initial point...
>>> es.optimize(cma.ff.rosen, verb_disp=0) #doctest: +ELLIPSIS
<cma...
>>> assert cma.ff.rosen(es.result[0]) < 1e-7 + 5.54781521192, es.result
>>> assert es.result[2] < 3300, es.result
The inverse transformation is (only) necessary if the `BoundPenalty`
boundary handler is used at the same time.
The `CMAEvolutionStrategy` class also provides a default logger
(cave: files are overwritten when the logger is used with the same
filename prefix):
>>> es = cma.CMAEvolutionStrategy(4 * [0.2], 0.5, {'verb_disp': 0})
>>> es.logger.disp_header() # annotate the print of disp
Iterat Nfevals function value axis ratio maxstd minstd
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [cma.ff.sphere(x) for x in X])
... es.logger.add() # log current iteration
... es.logger.disp([-1]) # display info for last iteration #doctest: +ELLIPSIS
1 ...
>>> es.logger.disp_header()
Iterat Nfevals function value axis ratio maxstd minstd
>>> # es.logger.plot() # will make a plot
Example implementing restarts with increasing popsize (IPOP):
>>> bestever = cma.optimization_tools.BestSolution()
>>> for lam in 10 * 2**np.arange(8): # 10, 20, 40, 80, ..., 10 * 2**7
... es = cma.CMAEvolutionStrategy(6 - 8 * np.random.rand(4), # 4-D
... 5, # initial std sigma0
... {'popsize': lam, # options
... 'verb_append': bestever.evalsall})
... # logger = cma.CMADataLogger().register(es, append=bestever.evalsall)
... while not es.stop():
... X = es.ask() # get list of new solutions
... fit = [cma.ff.rastrigin(x) for x in X] # evaluate each solution
... es.tell(X, fit) # besides for termination only the ranking in fit is used
...
... # display some output
... # logger.add() # add a "data point" to the log, writing in files
... es.disp() # uses option verb_disp with default 100
...
... print('termination:', es.stop())
... cma.s.pprint(es.best.__dict__)
...
... bestever.update(es.best)
...
... # show a plot
... # logger.plot();
... if bestever.f < 1e-8: # global optimum was hit
... break #doctest: +ELLIPSIS
(5_w,...
>>> assert es.result[1] < 1e-8, es.result
On the Rastrigin function, usually after five restarts the global
optimum is located.
Using the `multiprocessing` module, we can evaluate the function in
parallel with a simple modification of the example (however
multiprocessing seems not always reliable):
>>> from cma.fitness_functions import elli # cannot be an instance method
>>> from cma.optimization_tools import EvalParallel2
>>> es = cma.CMAEvolutionStrategy(22 * [0.0], 1.0, {'maxiter':10}) # doctest:+ELLIPSIS
(6_w,13)-aCMA-ES (mu_w=...
>>> with EvalParallel2(elli, es.popsize + 1) as eval_all:
... while not es.stop():
... X = es.ask()
... es.tell(X, eval_all(X))
... es.disp()
... # es.logger.add() # doctest:+ELLIPSIS
Iterat...
The final example shows how to resume:
>>> import pickle
>>>
>>> es0 = cma.CMAEvolutionStrategy(12 * [0.1], # a new instance, 12-D
... 0.12) # initial std sigma0
... #doctest: +ELLIPSIS
(5_w,...
>>> es0.optimize(cma.ff.rosen, iterations=100) #doctest: +ELLIPSIS
I...
>>> s = es0.pickle_dumps() # return pickle.dumps(es) with safeguards
>>> # save string s to file like open(filename, 'wb').write(s)
>>> del es0 # let's start fresh
>>> # s = open(filename, 'rb').read() # load string s from file
>>> es = pickle.loads(s) # read back es instance from string
>>> # resuming
>>> es.optimize(cma.ff.rosen, verb_disp=200) #doctest: +ELLIPSIS
200 ...
>>> assert es.result[2] < 15000, es.result
>>> assert cma.s.Mh.vequals_approximately(es.result[0], 12 * [1], 1e-5), es.result
>>> assert len(es.result) == 8, es.result
Details
=======
The following two enhancements are implemented, the latter is only
turned on by default for very small population sizes.
*Active CMA* is implemented with option ``CMA_active`` and
conducts an update of the covariance matrix with negative weights.
The negative update is implemented, such that positive definiteness
is guarantied. A typical speed up factor (number of f-evaluations)
is between 1.1 and two.
References: Jastrebski and Arnold, Improving evolution strategies
through active covariance matrix adaptation, CEC 2006.
Hansen, The CMA evolution strategy: a tutorial, arXiv 2016.
*Selective mirroring* is implemented with option ``CMA_mirrors``
in the method `get_mirror` and `get_selective_mirrors`.
The method `ask_and_eval` (used by `fmin`) will then sample
selectively mirrored vectors within the iteration
(``CMA_mirrormethod==1``). Otherwise, or if ``CMA_mirromethod==2``,
selective mirrors are injected for the next iteration.
In selective mirroring, only the worst solutions are mirrored. With
the default small number of mirrors, *pairwise selection* (where at
most one of the two mirrors contribute to the update of the
distribution mean) is implicitly guarantied under selective
mirroring and therefore not explicitly implemented.
Update: pairwise selection for injected mirrors is also applied in the
covariance matrix update: for all injected solutions, as for those from
TPA, this is now implemented in that the recombination weights are
constrained to be nonnegative for injected solutions in the covariance
matrix (otherwise recombination weights are anyway nonnegative). This
is a precaution to prevent failure when injected solutions are
systematically bad (see e.g. https://github.com/CMA-ES/pycma/issues/124),
but may not be "optimal" for mirrors.
References: Brockhoff et al, PPSN 2010, Auger et al, GECCO 2011.
:See also: `fmin` (), `OOOptimizer`, `CMAOptions`, `plot` (), `ask` (),
`tell` (), `ask_and_eval` ()
""" # here starts the code: (beating the code folding glitch)
@property # read only attribute decorator for a method
def popsize(self):
"""number of samples by default returned by `ask` ()
"""
return self.sp.popsize
# this is not compatible with python2.5:
# @popsize.setter
# def popsize(self, p):
# """popsize cannot be set (this might change in future)
# """
# raise RuntimeError("popsize cannot be changed")
def stop(self, check=True, ignore_list=(), check_in_same_iteration=False,
get_value=None):
"""return the termination status as dictionary.
With ``check == False``, the termination conditions are not checked
and the status might not reflect the current situation.
``check_on_same_iteration == False`` (new) does not re-check during
the same iteration. When termination options are manually changed,
it must be set to `True` to advance afterwards.
``stop().clear()`` removes the currently active termination
conditions.
As a convenience feature, keywords in `ignore_list` are removed from
the conditions.
If `get_value` is set to a condition name (not the empty string),
`stop` does not update the termination dictionary but returns the
measured value that would be compared to the threshold. This only
works for some conditions, like 'tolx'. If the condition name is
not known or cannot be computed, `None` is returned and no warning
is issued.
Testing `get_value` functionality:
>>> import cma
>>> es = cma.CMAEvolutionStrategy(2 * [1], 1e4, {'verbose': -9})
>>> with warnings.catch_warnings(record=True) as w:
... es.stop(get_value='tolx') # triggers zero iteration warning
... assert len(w) == 1, [str(wi) for wi in w]
>>> es = es.optimize(cma.ff.sphere, iterations=4)
>>> assert 1e3 < es.stop(get_value='tolx') < 1e4, es.stop(get_value='tolx')
>>> assert es.stop() == {}
>>> assert es.stop(get_value='catch 22') is None
"""
if (check and self.countiter > 0 and self.opts['termination_callback'] and
self.opts['termination_callback'] != str(self.opts['termination_callback'])):
self.callbackstop = utils.ListOfCallables(self.opts['termination_callback'])(self)
self._stopdict._get_value = get_value # a hack to avoid passing arguments down to _add_stop and back
# check_on_same_iteration == False makes como code much faster
res = self._stopdict(self, check_in_same_iteration or get_value or ( # update the stopdict and return a Dict (self)
check and self.countiter != self._stopdict.lastiter))
if ignore_list:
for key in ignore_list:
res.pop(key, None)
if get_value: # deliver _value and reset
res, self._stopdict._value = self._stopdict._value, None
return res
def __init__(self, x0, sigma0, inopts=None, options=None):
"""see class `CMAEvolutionStrategy`
`options` is for consistency with `fmin2` options and is only
in effect if ``inopts is None``.
"""
if options and inopts is None:
inopts = options
del options
self.inputargs = dict(locals()) # for the record
del self.inputargs['self'] # otherwise the instance self has a cyclic reference
if inopts is None:
inopts = {}
self.inopts = inopts
opts = CMAOptions(inopts).complement() # CMAOptions() == fmin([],[]) == defaultOptions()
if opts.eval('verbose') is None:
opts['verbose'] = CMAOptions()['verbose']
utils.global_verbosity = global_verbosity = opts.eval('verbose')
if global_verbosity < -8:
opts['verb_disp'] = 0
opts['verb_log'] = 0
opts['verb_plot'] = 0
if 'noise_handling' in opts and opts.eval('noise_handling'):
raise ValueError('noise_handling not available with class CMAEvolutionStrategy, use function fmin')
if 'restarts' in opts and opts.eval('restarts'):
raise ValueError('restarts not available with class CMAEvolutionStrategy, use function fmin')
self._set_x0(x0) # manage weird shapes, set self.x0
self.N_pheno = len(self.x0)
self.sigma0 = sigma0
if utils.is_str(sigma0):
raise ValueError("sigma0 must be a scalar, a string is no longer permitted")
# self.sigma0 = eval(sigma0) # like '1./N' or 'np.random.rand(1)[0]+1e-2'
if np.size(self.sigma0) != 1 or np.shape(self.sigma0):
raise ValueError('input argument sigma0 must be (or evaluate to) a scalar,'
' use `cma.ScaleCoordinates` or option `"CMA_stds"` when'
' different sigmas in each coordinate are in order.')
self.sigma = self.sigma0 # goes to inialize
# extract/expand options
N = self.N_pheno
if utils.is_str(opts['fixed_variables']):
opts['fixed_variables'] = ast.literal_eval(
opts['fixed_variables'])
assert (isinstance(opts['fixed_variables'], dict)
or opts['fixed_variables'] is None)
if isinstance(opts['fixed_variables'], dict):
N = self.N_pheno - len(opts['fixed_variables'])
opts.evalall(locals()) # using only N
if np.isinf(opts['CMA_diagonal']):
opts['CMA_diagonal'] = True
self.opts = opts
if not utils.is_nan(opts['seed']):
if self.opts['randn'] is np.random.randn:
if not opts['seed'] or opts['seed'] is time or str(opts['seed']).startswith('time'):
np.random.seed()
six_decimals = (time.time() - 1e6 * (time.time() // 1e6))
opts['seed'] = int(1e5 * np.random.rand() + six_decimals
+ 1e5 * (time.time() % 1))
np.random.seed(opts['seed']) # a printable seed
elif opts['seed'] not in (None, time):
utils.print_warning("seed=%s will never be used (seed is only used if option 'randn' is np.random.randn)"
% str(opts['seed']))
self.gp = transformations.GenoPheno(self.N_pheno,
opts['scaling_of_variables'],
opts['typical_x'],
opts['fixed_variables'],
opts['transformation'])
if not self.gp.isidentity:
warnings.warn("genotype-phenotype transformations induced by {0}\n may not"
" be compatible with more recently introduced code features (like integer handling) and are deprecated."
"\nRather use an objective function wrapper instead, see e.g."
"\n`ScaleCoordinates` or `FixVariables` in `cma.fitness_transformations`."
.format("""
opts['scaling_of_variables'],
opts['typical_x'],
opts['fixed_variables'],
opts['transformation'])"""),
DeprecationWarning
)
self.boundary_handler = opts['BoundaryHandler']
if isinstance(self.boundary_handler, type):
self.boundary_handler = self.boundary_handler(opts['bounds'])
elif opts['bounds'] not in (None, False, [], ()) or (
opts['bounds'][0] is None and opts['bounds'][1] is None):
warnings.warn("""
Option 'bounds' ignored because a BoundaryHandler *instance* was found.
Consider to pass only the desired BoundaryHandler class. """)
if not self.boundary_handler.has_bounds():
self.boundary_handler = BoundNone() # just a little faster and well defined
else:
# check that x0 is in bounds
if not self.boundary_handler.is_in_bounds(self.x0):
if opts['verbose'] >= 0:
idxs = self.boundary_handler.idx_out_of_bounds(self.x0)
warnings.warn("""
Initial solution is out of the domain boundaries
in ind%s %s:
x0 = %s
ldom = %s
udom = %s
THIS MIGHT LEAD TO AN EXCEPTION RAISED LATER ON.
""" % ('ices' if len(idxs) > 1 else 'ex',
str(idxs),
str(self.gp.pheno(self.x0)),
str(self.boundary_handler.bounds[0]),
str(self.boundary_handler.bounds[1])))
# set maxstd "in bounds" unless given explicitly
if opts['maxstd'] is None:
# set maxstd according to boundary range
opts['maxstd'] = (self.boundary_handler.get_bounds('upper', self.N_pheno) -
self.boundary_handler.get_bounds('lower', self.N_pheno)
) * opts['maxstd_boundrange']
# fix corner case with integer variables and bounds mod 1 at 0.5
self.boundary_handler.amend_bounds_for_integer_variables(
opts['integer_variables'])
# set self.mean to geno(x0)
tf_geno_backup = self.gp.tf_geno
if self.gp.tf_pheno and self.gp.tf_geno is None:
def identity(x):
return x
self.gp.tf_geno = identity # a hack to avoid an exception
warnings.warn(
"computed initial point may well be wrong, because no\n"
"inverse for the user provided phenotype transformation "
"was given")
self.mean = self.gp.geno(np.array(self.x0, copy=True),
from_bounds=self.boundary_handler.inverse,
copy=False)
self.mean_after_tell = np.array(self.mean, copy=True) # to separate any after-iteration change
self.mean0 = array(self.mean, copy=True) # relevant for initial injection
self.gp.tf_geno = tf_geno_backup
# without copy_always interface:
# self.mean = self.gp.geno(array(self.x0, copy=True), copy_if_changed=False)
self.N = len(self.mean)
assert N == self.N
# self.fmean = np.nan # TODO name should change? prints nan in output files (OK with matlab&octave)
# self.fmean_noise_free = 0. # for output only
opts.amend_integer_options(N, inopts)
self.sp = options_parameters.CMAParameters(N, opts, verbose=opts['verbose'] > 0)
self.sp0 = self.sp # looks useless, as it is not a copy
def instantiate_adapt_sigma(adapt_sigma, self):
"""return instantiated sigma adaptation object"""
if adapt_sigma is None:
utils.print_warning(
"Value `None` for option 'AdaptSigma' is ambiguous and\n"
"hence deprecated. AdaptSigma can be set to `True` or\n"
"`False` or a class or class instance which inherited from\n"
"`cma.sigma_adaptation.CMAAdaptSigmaBase`")
adapt_sigma = CMAAdaptSigmaCSA
elif adapt_sigma is True:
if self.opts['CMA_diagonal'] is True and self.N > 299:
adapt_sigma = CMAAdaptSigmaTPA
else:
adapt_sigma = CMAAdaptSigmaCSA
elif adapt_sigma is False:
adapt_sigma = CMAAdaptSigmaNone()
if isinstance(adapt_sigma, type): # is a class?
# then we want the instance
adapt_sigma = adapt_sigma(dimension=self.N, popsize=self.sp.popsize)
return adapt_sigma
self.adapt_sigma = instantiate_adapt_sigma(opts['AdaptSigma'], self)
self.mean_shift_samples = True if (isinstance(self.adapt_sigma, CMAAdaptSigmaTPA) or
opts['mean_shift_line_samples']) else False
def eval_vector(in_, opts, N, default_value=1.0):
"""return `default_value` as scalar or `in_` after removing
fixed variables if ``len(in_) == N``
"""
if in_ is None:
return default_value
if np.size(in_) == 1: # return scalar value
try:
res = float(in_[0])
except TypeError:
res = float(in_)
elif opts['fixed_variables'] and np.size(in_) > N:
res = array([in_[i] for i in range(len(in_))
if i not in opts['fixed_variables']],
dtype=float)
if len(res) != N:
utils.print_warning(
"resulting len %d != N = %d" % (len(res), N),
'eval_vector', iteration=self.countiter)
elif N and len(in_) < N: # recycle last entry
res = np.concatenate((in_, (N - len(in_)) * [in_[-1]]),
dtype=float)
else:
res = array(in_, dtype=float)
if np.size(res) not in (1, N):
raise ValueError(
"vector (like CMA_stds or minstd) must have "
"dimension %d instead of %d" % (N, np.size(res)))
return res
opts['minstd'] = eval_vector(opts['minstd'], opts, N, 0)
opts['maxstd'] = eval_vector(opts['maxstd'], opts, N, np.inf)
# iiinteger handling currently as LB-IC, see cma.integer_centering:
if len(opts['integer_variables']):
opts.set_integer_min_std(N, self.sp.weights.mueff)
self.integer_centering = IntegerCentering(self) # read opts['integer_variables'] and use boundary handler
else:
self.integer_centering = _pass # do nothing by default
if 11 < 3 and len(opts['integer_variables']):
try:
from . import integer
s = utils.format_message(
"Option 'integer_variables' is discouraged. "
"Use class `cma.integer.CMAIntMixed` or function "
"`cma.integer.fmin_int` instead.")
warnings.warn(s, category=DeprecationWarning) # TODO: doesn't show up
except ImportError:
pass