Skip to content

Commit 0924aeb

Browse files
author
larshertel
committed
fixed get best results for agg=True, gpyopt allow reps flag fixed, gpyopt repeat easy test
1 parent e76d75e commit 0924aeb

File tree

4 files changed

+57
-41
lines changed

4 files changed

+57
-41
lines changed

sherpa/algorithms/bayesian_optimization.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ class GPyOpt(Algorithm):
4949
verbosity (bool): Print models and other options during the optimization.
5050
max_num_trials (int): maximum number of trials to run for.
5151
"""
52-
allows_repetition = False
52+
allows_repetition = True
5353

5454
def __init__(self, model_type='GP', num_initial_data_points='infer',
5555
initial_data_points=[], acquisition_type='EI',
@@ -131,7 +131,6 @@ def _num_completed_trials(cls, results):
131131

132132
def _generate_bayesopt_batch(self, X, y, lower_is_better, domain):
133133
y_adjusted = y * (-1)**(not lower_is_better)
134-
135134
bo_step = gpyopt_package.methods.BayesianOptimization(f=None,
136135
domain=domain,
137136
X=X, Y=y_adjusted,

sherpa/algorithms/core.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,8 @@ def get_best_result(self, parameters, results, lower_is_better):
154154
parameters=parameters,
155155
min_count=self.num_times)
156156

157-
print(agg_results)
157+
if agg_results.empty:
158+
return {}
158159

159160
# Get best result so far
160161
best_idx = (agg_results.loc[:, 'Objective'].idxmin()

tests/test_algorithms.py

+18
Original file line numberDiff line numberDiff line change
@@ -473,6 +473,24 @@ def test_repeat_get_best_result():
473473
assert study.get_best_result()['a'] == 1 # not 3
474474

475475

476+
def test_repeat_get_best_result_called_midway():
477+
parameters = [sherpa.Choice('a', [1,2,3])]
478+
gs = sherpa.algorithms.GridSearch()
479+
gs = sherpa.algorithms.Repeat(algorithm=gs, num_times=3)
480+
study = sherpa.Study(parameters=parameters, algorithm=gs,
481+
lower_is_better=True,
482+
disable_dashboard=True)
483+
484+
objectives = [2.1,2.2,2.3, 9., 0.1, 9.1, 1.1,1.2,1.3]
485+
expected = [None, None, 1, 1, 1, 1, 1, 1, 3]
486+
487+
for exp, obj, trial in zip(expected, objectives, study):
488+
study.add_observation(trial, objective=obj)
489+
study.finalize(trial)
490+
assert study.get_best_result().get('a') == exp
491+
492+
493+
476494
def test_repeat_results_aggregation():
477495
parameters = [sherpa.Continuous('myparam', [0, 1])]
478496

tests/test_gpyopt.py

+36-38
Original file line numberDiff line numberDiff line change
@@ -368,41 +368,39 @@ def obj_func(x, y, z):
368368
assert rval['z'] == 5
369369

370370

371-
# def test_noisy_parabola():
372-
# def f(x, sd=1):
373-
# y = (x - 3) ** 2 + 10.
374-
# if sd == 0:
375-
# return y
376-
# else:
377-
# return y + numpy.random.normal(loc=0., scale=sd,
378-
# size=numpy.array(x).shape)
379-
#
380-
# parameters = [sherpa.Continuous('x1', [0., 7.])]
381-
#
382-
# bayesian_optimization = GPyOpt(max_concurrent=1,
383-
# max_num_trials=20,
384-
# model_type='GP',
385-
# acquisition_type='EI')
386-
# rep = Repeat(algorithm=bayesian_optimization,
387-
# num_times=5)
388-
# study = sherpa.Study(algorithm=rep,
389-
# parameters=parameters,
390-
# lower_is_better=True,
391-
# disable_dashboard=True)
392-
#
393-
# for trial in study:
394-
# print("Trial {}:\t{}".format(trial.id, trial.parameters))
395-
#
396-
# fval = f(trial.parameters['x1'], sd=1)
397-
# print("Function Value: {}".format(fval))
398-
# study.add_observation(trial=trial,
399-
# iteration=1,
400-
# objective=fval)
401-
# study.finalize(trial, status='COMPLETED')
402-
# rval = study.get_best_result()
403-
# print(rval)
404-
# # assert numpy.sqrt((rval['Objective'] - 3.)**2) < 0.2
405-
406-
#
407-
# if __name__ == '__main__':
408-
# test_noisy_parabola()
371+
def test_noisy_parabola():
372+
def f(x, sd=1):
373+
y = (x - 3) ** 2 + 10.
374+
if sd == 0:
375+
return y
376+
else:
377+
return y + numpy.random.normal(loc=0., scale=sd,
378+
size=numpy.array(x).shape)
379+
380+
parameters = [sherpa.Continuous('x1', [0., 7.])]
381+
382+
bayesian_optimization = GPyOpt(max_concurrent=1,
383+
max_num_trials=5,
384+
model_type='GP',
385+
acquisition_type='EI')
386+
rep = Repeat(algorithm=bayesian_optimization,
387+
num_times=3,
388+
agg=True)
389+
study = sherpa.Study(algorithm=rep,
390+
parameters=parameters,
391+
lower_is_better=True,
392+
disable_dashboard=True)
393+
394+
for trial in study:
395+
# print("Trial {}:\t{}".format(trial.id, trial.parameters))
396+
397+
fval = f(trial.parameters['x1'], sd=1)
398+
# print("Function Value: {}".format(fval))
399+
study.add_observation(trial=trial,
400+
iteration=1,
401+
objective=fval)
402+
study.finalize(trial, status='COMPLETED')
403+
# rval = study.get_best_result()
404+
# print(rval)
405+
print(study.results.query("Status=='COMPLETED'"))
406+
# assert numpy.sqrt((rval['Objective'] - 3.)**2) < 0.2

0 commit comments

Comments
 (0)