-
Notifications
You must be signed in to change notification settings - Fork 36
/
Copy pathjade.py
179 lines (162 loc) · 9.04 KB
/
jade.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
import numpy as np # engine for numerical computing
from scipy.stats import cauchy # Cauchy continuous random variable
from pypop7.optimizers.de.de import DE # abstract class of all differential evolution (DE)
class JADE(DE):
"""Adaptive Differential Evolution (JADE).
Parameters
----------
problem : `dict`
problem arguments with the following common settings (`keys`):
* 'fitness_function' - objective function to be **minimized** (`func`),
* 'ndim_problem' - number of dimensionality (`int`),
* 'upper_boundary' - upper boundary of search range (`array_like`),
* 'lower_boundary' - lower boundary of search range (`array_like`).
options : `dict`
optimizer options with the following common settings (`keys`):
* 'max_function_evaluations' - maximum of function evaluations (`int`, default: `np.inf`),
* 'max_runtime' - maximal runtime to be allowed (`float`, default: `np.inf`),
* 'seed_rng' - seed for random number generation needed to be *explicitly* set (`int`);
and with the following particular settings (`keys`):
* 'n_individuals' - number of offspring, aka offspring population size (`int`, default: `100`),
* 'mu' - mean of normal distribution for adaptation of crossover probability (`float`,
default: `0.5`),
* 'median' - median of Cauchy distribution for adaptation of mutation factor (`float`,
default: `0.5`),
* 'p' - level of greediness of mutation strategy (`float`, default: `0.05`),
* 'c' - life span (`float`, default: `0.1`),
* 'is_bound' - flag to limit all samplings inside the search range (`boolean`, default: `False`).
Examples
--------
Use the optimizer to minimize the well-known test function
`Rosenbrock <http://en.wikipedia.org/wiki/Rosenbrock_function>`_:
.. code-block:: python
:linenos:
>>> import numpy
>>> from pypop7.benchmarks.base_functions import rosenbrock # function to be minimized
>>> from pypop7.optimizers.de.jade import JADE
>>> problem = {'fitness_function': rosenbrock, # define problem arguments
... 'ndim_problem': 2,
... 'lower_boundary': -5*numpy.ones((2,)),
... 'upper_boundary': 5*numpy.ones((2,))}
>>> options = {'max_function_evaluations': 5000, # set optimizer options
... 'seed_rng': 0}
>>> jade = JADE(problem, options) # initialize the optimizer class
>>> results = jade.optimize() # run the optimization process
>>> # return the number of function evaluations and best-so-far fitness
>>> print(f"JADE: {results['n_function_evaluations']}, {results['best_so_far_y']}")
JADE: 5000, 4.844728910084905e-05
For its correctness checking of coding, refer to `this code-based repeatability report
<https://tinyurl.com/bddsba5s>`_ for more details.
Attributes
----------
c : `float`
life span.
is_bound : `boolean`
flag to limit all samplings inside the search range.
median : `float`
median of Cauchy distribution for adaptation of mutation factor.
mu : `float`
mean of normal distribution for adaptation of crossover probability.
n_individuals : `int`
number of offspring, offspring population size.
p : `float`
level of greediness of mutation strategy.
References
----------
Zhang, J., and Sanderson, A. C. 2009.
`JADE: Adaptive differential evolution with optional external archive.
<https://ieeexplore.ieee.org/document/5208221/>`_
IEEE Transactions on Evolutionary Computation, 13(5), pp.945–958.
"""
def __init__(self, problem, options):
DE.__init__(self, problem, options)
self.mu = options.get('mu', 0.5) # mean of normal distribution for adaptation of crossover probabilities
self.median = options.get('median', 0.5) # location of Cauchy distribution for adaptation of mutation factor
self.p = options.get('p', 0.05) # level of greediness of the mutation strategy
assert 0.0 <= self.p <= 1.0
self.c = options.get('c', 0.1) # life span
assert 0.0 <= self.c <= 1.0
self.is_bound = options.get('is_bound', False)
def initialize(self, args=None):
x = self.rng_initialization.uniform(self.initial_lower_boundary, self.initial_upper_boundary,
size=(self.n_individuals, self.ndim_problem)) # population
y = np.empty((self.n_individuals,)) # fitness
for i in range(self.n_individuals):
if self._check_terminations():
break
y[i] = self._evaluate_fitness(x[i], args)
a = np.empty((0, self.ndim_problem)) # set of archived inferior solutions
return x, y, a
def bound(self, x=None, xx=None):
if not self.is_bound:
return x
for k in range(self.n_individuals):
idx = np.array(x[k] < self.lower_boundary)
if idx.any():
x[k][idx] = (self.lower_boundary + xx[k])[idx]/2.0
idx = np.array(x[k] > self.upper_boundary)
if idx.any():
x[k][idx] = (self.upper_boundary + xx[k])[idx]/2.0
return x
def mutate(self, x=None, y=None, a=None):
x_mu = np.empty((self.n_individuals, self.ndim_problem)) # mutated population
f_mu = np.empty((self.n_individuals,)) # mutated mutation factors
order = np.argsort(y)[:int(np.ceil(self.p*self.n_individuals))] # index of the [100*p]% best individuals
x_p = x[self.rng_optimization.choice(order, (self.n_individuals,))]
x_un = np.vstack((np.copy(x), a)) # archive
for k in range(self.n_individuals):
f_mu[k] = cauchy.rvs(loc=self.median, scale=0.1, random_state=self.rng_optimization)
while f_mu[k] <= 0.0:
f_mu[k] = cauchy.rvs(loc=self.median, scale=0.1, random_state=self.rng_optimization)
if f_mu[k] > 1.0:
f_mu[k] = 1.0
r1 = self.rng_optimization.choice([i for i in range(self.n_individuals) if i != k])
r2 = self.rng_optimization.choice([i for i in range(len(x_un)) if i != k and i != r1])
x_mu[k] = x[k] + f_mu[k]*(x_p[k] - x[k]) + f_mu[k]*(x[r1] - x_un[r2])
return x_mu, f_mu
def crossover(self, x_mu=None, x=None):
x_cr = np.copy(x)
p_cr = self.rng_optimization.normal(self.mu, 0.1, (self.n_individuals,)) # crossover probabilities
# truncate to [0, 1]
p_cr = np.minimum(np.maximum(p_cr, 0.0), 1.0)
for k in range(self.n_individuals):
i_rand = self.rng_optimization.integers(self.ndim_problem)
for i in range(self.ndim_problem):
if (i == i_rand) or (self.rng_optimization.random() < p_cr[k]):
x_cr[k, i] = x_mu[k, i]
return x_cr, p_cr
def select(self, args=None, x=None, y=None, x_cr=None, a=None, f_mu=None, p_cr=None):
f = np.empty((0,)) # set of all successful mutation factors
p = np.empty((0,)) # set of all successful crossover probabilities
for k in range(self.n_individuals):
if self._check_terminations():
break
yy = self._evaluate_fitness(x_cr[k], args)
if yy < y[k]:
a = np.vstack((a, x[k])) # archive of the inferior solution
f = np.hstack((f, f_mu[k])) # archive of the successful mutation factor
p = np.hstack((p, p_cr[k])) # archive of the successful crossover probability
x[k] = x_cr[k]
y[k] = yy
if len(p) != 0: # for mean update of normal distribution
self.mu = (1.0 - self.c)*self.mu + self.c*np.mean(p)
if len(f) != 0: # for location update of Cauchy distribution
self.median = (1.0 - self.c)*self.median + self.c*np.sum(np.power(f, 2))/np.sum(f)
return x, y, a
def iterate(self, x=None, y=None, a=None, args=None):
x_mu, f_mu = self.mutate(x, y, a)
x_cr, p_cr = self.crossover(x_mu, x)
x_cr = self.bound(x_cr, x)
x, y, a = self.select(args, x, y, x_cr, a, f_mu, p_cr)
# randomly remove solutions to keep the archive size fixed
if len(a) > self.n_individuals:
a = np.delete(a, self.rng_optimization.choice(len(a), (len(a) - self.n_individuals,), False), 0)
self._n_generations += 1
return x, y, a
def optimize(self, fitness_function=None, args=None):
fitness = DE.optimize(self, fitness_function)
x, y, a = self.initialize(args)
while not self._check_terminations():
self._print_verbose_info(fitness, y)
x, y, a = self.iterate(x, y, a, args)
return self._collect(fitness, y)