Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Harris Hawks Optimization integration #280

Merged
merged 11 commits into from
Nov 16, 2020
4 changes: 3 additions & 1 deletion NiaPy/algorithms/basic/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from NiaPy.algorithms.basic.foa import ForestOptimizationAlgorithm
from NiaPy.algorithms.basic.mbo import MonarchButterflyOptimization
from NiaPy.algorithms.basic.bea import BeesAlgorithm
from NiaPy.algorithms.basic.hho import HarrisHawksOptimization
__all__ = [
'BatAlgorithm',
'FireflyAlgorithm',
Expand Down Expand Up @@ -82,5 +83,6 @@
'MutatedCenterParticleSwarmOptimization',
'OppositionVelocityClampingParticleSwarmOptimization',
'ComprehensiveLearningParticleSwarmOptimizer',
'CenterParticleSwarmOptimization'
'CenterParticleSwarmOptimization',
'HarrisHawksOptimization'
]
212 changes: 212 additions & 0 deletions NiaPy/algorithms/basic/hho.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,212 @@
# encoding=utf8
import logging

from numpy import random as rand, sin, pi, argmin, abs, mean
from scipy.special import gamma

from NiaPy.algorithms.algorithm import Algorithm

logging.basicConfig()
logger = logging.getLogger('NiaPy.algorithms.basic')
logger.setLevel('INFO')

__all__ = ['HarrisHawksOptimization']


class HarrisHawksOptimization(Algorithm):
r"""Implementation of Harris Hawks Optimization algorithm.

Algorithm:
Harris Hawks Optimization

Date:
2020

Authors:
Francisco Jose Solis-Munoz

License:
MIT

Reference paper:
Heidari et al. "Harris hawks optimization: Algorithm and applications". Future Generation Computer Systems. 2019. Vol. 97. 849-872.

Attributes:
Name (List[str]): List of strings representing algorithm name.
levy (float): Levy factor.

See Also:
* :class:`NiaPy.algorithms.Algorithm`
"""
Name = ['HarrisHawksOptimization', 'HHO']

def __init__(self, **kwargs):
super(HarrisHawksOptimization, self).__init__(**kwargs)

@staticmethod
def algorithmInfo():
r"""Get algorithms information.

Returns:
str: Algorithm information.

See Also:
* :func:`NiaPy.algorithms.Algorithm.algorithmInfo`
"""
return r"""Heidari et al. "Harris hawks optimization: Algorithm and applications". Future Generation Computer Systems. 2019. Vol. 97. 849-872."""

@staticmethod
def typeParameters():
r"""Return dict with where key of dict represents parameter name and values represent checking functions for selected parameter.

Returns:
Dict[str, Callable]:
* levy (Callable[[Union[float, int]], bool]): Levy factor.

See Also:
* :func:`NiaPy.algorithms.Algorithm.typeParameters`
"""
d = Algorithm.typeParameters()
d.update({
'levy': lambda x: isinstance(x, (float, int)) and x > 0,
})
return d

def setParameters(self, NP=40, levy=0.01, **ukwargs):
r"""Set the parameters of the algorithm.

Args:
levy (Optional[float]): Levy factor.

See Also:
* :func:`NiaPy.algorithms.Algorithm.setParameters`
"""
Algorithm.setParameters(self, NP=NP, **ukwargs)
self.levy = levy

def getParameters(self):
r"""Get parameters of the algorithm.

Returns:
Dict[str, Any]
"""
d = Algorithm.getParameters(self)
d.update({
'levy': self.levy
})
return d

def initPopulation(self, task, rnd=rand):
r"""Initialize the starting population.

Parameters:
task (Task): Optimization task

Returns:
Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:
1. New population.
2. New population fitness/function values.

See Also:
* :func:`NiaPy.algorithms.Algorithm.initPopulation`
"""
Sol, Fitness, d = Algorithm.initPopulation(self, task)
return Sol, Fitness, d

def levy_function(self, dims, step=0.01, rnd=rand):
r"""Calculate levy function.

Parameters:
dim (int): Number of dimensions
step (float): Step of the Levy function

Returns:
float: The Levy function evaluation
"""
beta = 1.5
sigma = (gamma(1 + beta) * sin(pi * beta / 2) / (gamma((1 + beta / 2) * beta * 2.0 ** ((beta - 1) / 2)))) ** (1 / beta)
normal_1 = rnd.normal(0, sigma, size=dims)
normal_2 = rnd.normal(0, 1, size=dims)
result = step * normal_1 / (abs(normal_2) ** (1 / beta))
return result

def runIteration(self, task, Sol, Fitness, xb, fxb, **dparams):
r"""Core function of Harris Hawks Optimization.

Parameters:
task (Task): Optimization task.
Sol (numpy.ndarray): Current population
Fitness (numpy.ndarray[float]): Current population fitness/funciton values
xb (numpy.ndarray): Current best individual
fxb (float): Current best individual function/fitness value
dparams (Dict[str, Any]): Additional algorithm arguments

Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]:
1. New population
2. New population fitness/function vlues
3. New global best solution
4. New global best fitness/objective value
"""
# Decreasing energy factor
decreasing_energy_factor = 2 * (1 - task.iters() / task.nGEN)
mean_sol = mean(Sol)
# Update population
for i in range(self.NP):
jumping_energy = self.Rand.uniform(0, 2)
decreasing_energy_random = self.Rand.uniform(-1, 1)
escaping_energy = decreasing_energy_factor * decreasing_energy_random
escaping_energy_abs = abs(escaping_energy)
random_number = self.Rand.rand()
if escaping_energy >= 1 and random_number >= 0.5:
# 0. Exploration: Random tall tree
rhi = self.Rand.randint(0, self.NP)
random_agent = Sol[rhi]
Sol[i] = random_agent - self.Rand.rand() * abs(random_agent - 2 * self.Rand.rand() * Sol[i])
elif escaping_energy_abs >= 1 and random_number < 0.5:
# 1. Exploration: Family members mean
Sol[i] = (xb - mean_sol) - self.Rand.rand() * self.Rand.uniform(task.Lower, task.Upper)
elif escaping_energy_abs >= 0.5 and random_number >= 0.5:
# 2. Exploitation: Soft besiege
Sol[i] = \
(xb - Sol[i]) - \
escaping_energy * \
abs(jumping_energy * xb - Sol[i])
elif escaping_energy_abs < 0.5 and random_number >= 0.5:
# 3. Exploitation: Hard besiege
Sol[i] = \
xb - \
escaping_energy * \
abs(xb - Sol[i])
elif escaping_energy_abs >= 0.5 and random_number < 0.5:
# 4. Exploitation: Soft besiege with pprogressive rapid dives
cand1 = task.repair(xb - escaping_energy * abs(jumping_energy * xb - Sol[i]), rnd=self.Rand)
random_vector = self.Rand.rand(task.D)
cand2 = task.repair(cand1 + random_vector * self.levy_function(task.D, self.levy, rnd=self.Rand), rnd=self.Rand)
if task.eval(cand1) < Fitness[i]:
Sol[i] = cand1
elif task.eval(cand2) < Fitness[i]:
Sol[i] = cand2
elif escaping_energy_abs < 0.5 and random_number < 0.5:
# 5. Exploitation: Hard besiege with progressive rapid dives
cand1 = task.repair(xb - escaping_energy * abs(jumping_energy * xb - mean_sol), rnd=self.Rand)
random_vector = self.Rand.rand(task.D)
cand2 = task.repair(cand1 + random_vector * self.levy_function(task.D, self.levy, rnd=self.Rand), rnd=self.Rand)
if task.eval(cand1) < Fitness[i]:
Sol[i] = cand1
elif task.eval(cand2) < Fitness[i]:
Sol[i] = cand2
# Repair agent (from population) values
Sol[i] = task.repair(Sol[i], rnd=self.Rand)
# Eval population
Fitness[i] = task.eval(Sol[i])
# Get best of population
best_index = argmin(Fitness)
xb_cand = Sol[best_index].copy()
fxb_cand = Fitness[best_index].copy()
if fxb_cand < fxb:
fxb = fxb_cand
xb = xb_cand.copy()
return Sol, Fitness, xb, fxb, {}

# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
3 changes: 2 additions & 1 deletion NiaPy/tests/test_algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,7 @@ def setUpTasks(self, D, bech='griewank', nFES=None, nGEN=None):
Returns:
Tuple[Taks, Taks]: Two testing tasks.
"""
# TODO Fix bech by bench
task1, task2 = TestingTask(D=D, nFES=self.nFES if nFES is None else nFES, nGEN=self.nGEN if nGEN is None else nGEN, benchmark=bech), TestingTask(D=D, nFES=self.nFES if nFES is None else nFES, nGEN=self.nGEN if nGEN is None else nGEN, benchmark=bech)
return task1, task2

Expand All @@ -312,7 +313,7 @@ def test_algorithm_run(self, a=None, b=None, benc='griewank', nFES=None, nGEN=No
"""
if a is None or b is None: return
for D in self.D:
task1, task2 = self.setUpTasks(D, benc, nFES=nFES)
task1, task2 = self.setUpTasks(D, benc, nGEN=nGEN, nFES=nFES)
# x = a.run(task1) # For debugging purposes
# y = b.run(task2) # For debugging purposes
q = Queue(maxsize=2)
Expand Down
29 changes: 29 additions & 0 deletions NiaPy/tests/test_hho.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# encoding=utf8

from NiaPy.tests.test_algorithm import AlgorithmTestCase, MyBenchmark
from NiaPy.algorithms.basic import HarrisHawksOptimization

class HHOTestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = HarrisHawksOptimization

def test_parameter_type(self):
d = self.algo.typeParameters()
self.assertTrue(d['levy'](0.01))
self.assertFalse(d['levy'](-0.01))
self.assertTrue(d['NP'](10))
self.assertFalse(d['NP'](-10))
self.assertFalse(d['NP'](0))

def test_custom_works_fine(self):
hho_custom = self.algo(NP=20, levy=0.01, seed=self.seed)
hho_customc = self.algo(NP=20, levy=0.01, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, hho_custom, hho_customc, MyBenchmark())

def test_griewank_works_fine(self):
hho_griewank = self.algo(NP=20, nFES=4000, nGEN=200, levy=0.01, seed=self.seed)
hho_griewankc = self.algo(NP=20, nFES=4000, nGEN=200, levy=0.01, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, hho_griewank, hho_griewankc)

# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
2 changes: 2 additions & 0 deletions NiaPy/util/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ def limit_repair(x, Lower, Upper, **kwargs):

"""

# TODO: Add one-liner np.clip approach

ir = where(x < Lower)
x[ir] = Lower[ir]
ir = where(x > Upper)
Expand Down