Skip to content

Commit b97ced7

Browse files
authored
Merge pull request #33 from wniec/NLP_SHADE_RSP_FIX
fix NL_SHADE_RSP discrepancies
2 parents 012bcc4 + bbf0433 commit b97ced7

4 files changed

Lines changed: 93 additions & 72 deletions

File tree

dynamicalgorithmselection/agents/RLDAS_agent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def _save_context(self, optimizer, alg_name):
112112
elif "MadDE" in alg_name:
113113
specific_attrs = ["pm", "pbest", "PqBX"]
114114
elif "NL_SHADE" in alg_name:
115-
specific_attrs = ["NA", "pa"]
115+
specific_attrs = ["NA"]
116116

117117
for attr in specific_attrs:
118118
if hasattr(optimizer, attr):

dynamicalgorithmselection/agents/RLDAS_random_agent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def _save_context(self, optimizer, alg_name):
6363
elif "MadDE" in alg_name:
6464
specific_attrs = ["pm", "pbest", "PqBX"]
6565
elif "NL_SHADE" in alg_name:
66-
specific_attrs = ["NA", "pa"]
66+
specific_attrs = ["NA"]
6767

6868
for attr in specific_attrs:
6969
if hasattr(optimizer, attr):

dynamicalgorithmselection/optimizers/DE/MADDE.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ def iterate(self, x=None, y=None, args=None):
161161
# Archive update: one-by-one to match RL-DAS semantics
162162
for i in np.where(optim)[0]:
163163
if len(self.archive) < self.NA:
164-
self.archive = np.vstack([self.archive, x[i:i+1]])
164+
self.archive = np.vstack([self.archive, x[i : i + 1]])
165165
else:
166166
ri = self.rng_optimization.integers(len(self.archive))
167167
self.archive[ri] = x[i]
@@ -257,9 +257,7 @@ def _ctb_w_arc(self, x, best, archive, F):
257257
xb = best[rb]
258258
x1 = x[r1]
259259
x2 = combined[r2]
260-
return (
261-
x + F[:, np.newaxis] * (xb - x) + F[:, np.newaxis] * (x1 - x2)
262-
)
260+
return x + F[:, np.newaxis] * (xb - x) + F[:, np.newaxis] * (x1 - x2)
263261

264262
def _ctr_w_arc(self, x, archive, F):
265263
NP = x.shape[0]

dynamicalgorithmselection/optimizers/DE/NL_SHADE_RSP.py

Lines changed: 89 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,10 @@ class NL_SHADE_RSP(DE):
77
and to its implementation in RL-DAS project.
88
In case of any difference, it follows RL-DAS approach"""
99

10-
start_condition_parameters = ["x", "y", "archive", "MF", "MCr", "k_idx", "pa"]
10+
start_condition_parameters = ["x", "y", "archive", "MF", "MCr", "k_idx"]
1111

1212
def __init__(self, problem, options):
1313
super().__init__(problem, options)
14-
self.Nmin = options.get("Nmin", 4)
1514
self.n_individuals = self.Nmax
1615

1716
self.pa = 0.5
@@ -67,12 +66,25 @@ def _choose_F_Cr(self, NP):
6766
def _update_memory(self, SF, SCr, df):
6867
if len(SF) > 0:
6968
w = df / np.sum(df)
70-
mean_wL_F = np.sum(w * (SF**2)) / (np.sum(w * SF) + 1e-15)
71-
mean_wL_Cr = np.sum(w * (SCr**2)) / (np.sum(w * SCr) + 1e-15)
69+
70+
sum_w_SF = np.sum(w * SF)
71+
if sum_w_SF > 0.000001:
72+
mean_wL_F = np.sum(w * (SF**2)) / sum_w_SF
73+
else:
74+
mean_wL_F = 0.5
75+
76+
sum_w_SCr = np.sum(w * SCr)
77+
if sum_w_SCr > 0.000001:
78+
mean_wL_Cr = np.sum(w * (SCr**2)) / sum_w_SCr
79+
else:
80+
mean_wL_Cr = 0.5
7281

7382
self.MF[self.k_idx] = mean_wL_F
7483
self.MCr[self.k_idx] = mean_wL_Cr
7584
self.k_idx = (self.k_idx + 1) % self.memory_size
85+
else:
86+
self.MF[self.k_idx] = 0.5
87+
self.MCr[self.k_idx] = 0.5
7688

7789
def iterate(self, x=None, y=None, args=None):
7890
if x is None or y is None:
@@ -99,46 +111,49 @@ def iterate(self, x=None, y=None, args=None):
99111
Cr_b = 2.0 * (nfe_ratio - 0.5) if nfe_ratio < 0.5 else 0.0
100112

101113
# Rank-based probabilities for r2 (RSP)
102-
ranks = np.exp(-np.arange(NP) / NP)
114+
ranks = np.exp(-(np.arange(NP) + 1) / NP)
103115
pr = ranks / np.sum(ranks)
104116

105117
x2 = np.zeros_like(x)
106118
use_arc = self.rng_optimization.random(NP) < self.pa
107119

120+
# Logical Change 3: Archive indices canceled if archive is too small
121+
if len(self.archive) < 25:
122+
use_arc[:] = False
123+
108124
r1 = np.zeros(NP, dtype=int)
109125
r2 = np.zeros(NP, dtype=int)
110126
pbest_idx = np.zeros(NP, dtype=int)
111127

112128
for i in range(NP):
113-
# pbest index
114-
valid_pbest = [j for j in range(pb_upper) if j != i]
115-
pb_i = int(self.rng_optimization.choice(valid_pbest)) if valid_pbest else i
129+
# Logical Change 3: pbest index with bounded retries
130+
pb_i = self.rng_optimization.integers(0, pb_upper)
131+
count = 0
132+
while pb_i == i and count < 1:
133+
pb_i = self.rng_optimization.integers(0, NP)
134+
count += 1
116135
pbest_idx[i] = pb_i
117136

118-
# r1 index (uniform)
119-
valid_r1 = [j for j in range(NP) if j not in (i, pb_i)]
120-
r1_i = (
121-
int(self.rng_optimization.choice(valid_r1))
122-
if valid_r1
123-
else self.rng_optimization.integers(0, NP)
124-
)
137+
# Logical Change 3: r1 index with bounded retries
138+
r1_i = self.rng_optimization.integers(0, NP)
139+
count = 0
140+
while (r1_i == i or r1_i == pb_i) and count < 25:
141+
r1_i = self.rng_optimization.integers(0, NP)
142+
count += 1
125143
r1[i] = r1_i
126144

127-
# r2 index (archive or RSP)
145+
# Logical Change 3: r2 index (archive or RSP) with bounded retries
128146
if use_arc[i] and len(self.archive) > 0:
129-
r2[i] = self.rng_optimization.integers(0, len(self.archive))
147+
r2[i] = self.rng_optimization.integers(
148+
0, min(len(self.archive), self.NA)
149+
)
130150
x2[i] = self.archive[r2[i]]
131151
else:
132-
use_arc[i] = False
133-
valid_r2 = [j for j in range(NP) if j not in (i, pb_i, r1_i)]
134-
135-
if valid_r2:
136-
# Re-normalize RSP probabilities for the remaining valid choices
137-
valid_pr = pr[valid_r2] / np.sum(pr[valid_r2])
138-
r2_i = int(self.rng_optimization.choice(valid_r2, p=valid_pr))
139-
else:
140-
r2_i = self.rng_optimization.integers(0, NP)
141-
152+
r2_i = int(self.rng_optimization.choice(np.arange(NP), p=pr))
153+
count = 0
154+
while (r2_i == i or r2_i == pb_i or r2_i == r1_i) and count < 25:
155+
r2_i = int(self.rng_optimization.choice(np.arange(NP), p=pr))
156+
count += 1
142157
r2[i] = r2_i
143158
x2[i] = x[r2_i]
144159

@@ -150,26 +165,26 @@ def iterate(self, x=None, y=None, args=None):
150165

151166
us = np.copy(x)
152167

153-
CrossExponential = self.rng_optimization.random() < 0.5
168+
perform_binomial_crossover = self.rng_optimization.random() < 0.5
154169

155-
# ^ Bug copied from RL-DAS implementation
156-
if CrossExponential:
170+
if perform_binomial_crossover:
157171
# Executes Binomial logic with Cr_b when CrossExponential is True -> RL-DAS bug compatibility
158172
for i in range(NP):
159173
jrand = self.rng_optimization.integers(self.ndim_problem)
160174
for j in range(self.ndim_problem):
161175
if self.rng_optimization.random() < Cr_b or j == jrand:
162176
us[i, j] = vs[i, j]
163177
else:
164-
# Executes Exponential logic with Cr when CrossExponential is False -> RL-DAS bug compatibility
165-
for i in range(NP):
166-
n1 = self.rng_optimization.integers(self.ndim_problem)
167-
n2 = 1
168-
while self.rng_optimization.random() < Cr[i] and n2 < self.ndim_problem:
169-
n2 += 1
170-
for j in range(n2):
171-
idx = (n1 + j) % self.ndim_problem
172-
us[i, idx] = vs[i, idx]
178+
# Executes RL-DAS vectorized "Exponential_" logic with Cr:
179+
# - pick random start per individual, mark all dims >= start
180+
# - independently copy from vs where random > Cr (inverted probability, no wrapping)
181+
dim = self.ndim_problem
182+
L = self.rng_optimization.integers(dim, size=NP)
183+
L = L.repeat(dim).reshape(NP, dim) <= np.arange(dim)
184+
rvs = self.rng_optimization.random((NP, dim))
185+
Cr_2d = Cr.repeat(dim).reshape(NP, dim)
186+
mask = np.where(rvs > Cr_2d, L, False)
187+
us = np.where(mask, vs, us)
173188

174189
# BUG 5: Hardcoded [-100, 100] bounds
175190
out_of_bounds = (us < -100) | (us > 100)
@@ -193,41 +208,45 @@ def iterate(self, x=None, y=None, args=None):
193208
)
194209
better_idx = np.where(new_y < y)[0]
195210

196-
if len(better_idx) > 0:
197-
# Update Archive Probability (pa)
198-
df = y[better_idx] - new_y[better_idx]
199-
arc_used_better = use_arc[better_idx]
200-
201-
# BUG Swapped Archive metrics -> from RL-DAS compatibility
202-
df_P = np.sum(df[arc_used_better]) # Population gets archive improvements
203-
df_A = np.sum(df[~arc_used_better]) # Archive gets population improvements
211+
SF = np.array([])
212+
SCr = np.array([])
213+
df = np.array([])
204214

205-
n_A_total = np.sum(use_arc)
206-
n_P_total = NP - n_A_total
215+
if len(better_idx) > 0:
216+
# Logical Change 4: Normalized df calculation
217+
df = (y[better_idx] - new_y[better_idx]) / (y[better_idx] + 1e-9)
207218

208-
mean_A = df_A / n_A_total if n_A_total > 0 else 0
209-
mean_P = df_P / n_P_total if n_P_total > 0 else 0
219+
arc_used_better = use_arc[better_idx]
210220

211-
if mean_A + mean_P > 0:
212-
self.pa = mean_A / (mean_A + mean_P)
213-
self.pa = np.clip(self.pa, 0.1, 0.9) # Clipping rule applied
221+
fp = np.sum(df[arc_used_better])
222+
fa = np.sum(df[~arc_used_better])
223+
na = np.sum(arc_used_better)
214224

215-
# Update Archive
216-
success_x = x[better_idx]
217-
self.archive = np.vstack([self.archive, success_x])
218-
if len(self.archive) > self.NA:
219-
# Remove random individuals
220-
remove_idx = self.rng_optimization.choice(
221-
len(self.archive), len(self.archive) - self.NA, replace=False
225+
if na == 0 or fa == 0:
226+
self.pa = 0.5
227+
else:
228+
self.pa = (fa / (na + 1e-15)) / (
229+
(fa / (na + 1e-15)) + (fp / (NP - na + 1e-15))
222230
)
223-
self.archive = np.delete(self.archive, remove_idx, axis=0)
231+
self.pa = np.clip(self.pa, 0.1, 0.9)
232+
233+
# Logical Change 6: One-by-one Archive updating/trimming
234+
for i in better_idx:
235+
if len(self.archive) < self.NA:
236+
self.archive = np.vstack([self.archive, x[i]])
237+
else:
238+
replace_idx = self.rng_optimization.integers(0, len(self.archive))
239+
self.archive[replace_idx] = x[i]
224240

225-
# Record successes for memory update
226-
self._update_memory(F[better_idx], Cr[better_idx], df)
241+
SF = F[better_idx]
242+
SCr = Cr[better_idx]
227243

228244
x[better_idx] = us[better_idx]
229245
y[better_idx] = new_y[better_idx]
230246

247+
# Update memory and pa every generation (even when no improvements)
248+
self._update_memory(SF, SCr, df)
249+
231250
# NLPSR (Non-Linear Population Size Reduction)
232251
FEs = self.n_function_evaluations
233252
MaxFEs = self.max_function_evaluations
@@ -247,6 +266,9 @@ def iterate(self, x=None, y=None, args=None):
247266
y = y[sort_idx_final][:new_NP]
248267
self.n_individuals = new_NP
249268
self.NA = int(max(new_NP * 2.1, self.Nmin))
269+
# Slice archive if it exceeds the new reduced NA
270+
if len(self.archive) > self.NA:
271+
self.archive = self.archive[: self.NA]
250272

251273
self._n_generations += 1
252274
return x, y
@@ -258,6 +280,8 @@ def optimize(self, fitness_function=None, args=None):
258280
y = self.start_conditions.get("y", None)
259281

260282
x, y = self.initialize(args, x, y)
283+
# RL-DAS resets archive-use probability once per scheduled optimizer run.
284+
self.pa = 0.5
261285

262286
while True:
263287
old_evals = self.n_function_evaluations
@@ -271,7 +295,6 @@ def optimize(self, fitness_function=None, args=None):
271295
"MF": self.MF[:],
272296
"MCr": self.MCr[:],
273297
"k_idx": self.k_idx,
274-
"pa": self.pa,
275298
}
276299
)
277300
if self._check_terminations() or self.n_function_evaluations == old_evals:
@@ -295,7 +318,7 @@ def set_data(
295318
start_conditions = {}
296319
start_conditions.update({"x": x[indices], "y": y[indices]})
297320
self.start_conditions = start_conditions
298-
for var in ["archive", "MF", "MCr", "k_idx", "pa"]:
321+
for var in ["archive", "MF", "MCr", "k_idx"]:
299322
if var in kwargs:
300323
setattr(self, var, kwargs[var])
301324
self.best_so_far_x = kwargs.get("best_x", None)

0 commit comments

Comments
 (0)