23
23
from botorch .acquisition import analytic , monte_carlo , multi_objective
24
24
from botorch .acquisition .acquisition import AcquisitionFunction
25
25
from botorch .acquisition .fixed_feature import FixedFeatureAcquisitionFunction
26
+ from botorch .acquisition .joint_entropy_search import qJointEntropySearch
26
27
from botorch .acquisition .knowledge_gradient import (
27
28
_get_value_function ,
28
29
qKnowledgeGradient ,
@@ -468,6 +469,90 @@ def gen_batch_initial_conditions(
468
469
return batch_initial_conditions
469
470
470
471
472
+ def gen_optimal_input_initial_conditions (
473
+ acq_function : AcquisitionFunction ,
474
+ bounds : Tensor ,
475
+ q : int ,
476
+ num_restarts : int ,
477
+ raw_samples : int ,
478
+ fixed_features : dict [int , float ] | None = None ,
479
+ options : dict [str , bool | float | int ] | None = None ,
480
+ inequality_constraints : list [tuple [Tensor , Tensor , float ]] | None = None ,
481
+ equality_constraints : list [tuple [Tensor , Tensor , float ]] | None = None ,
482
+ ):
483
+ device = bounds .device
484
+ if not hasattr (acq_function , "optimal_inputs" ):
485
+ raise AttributeError (
486
+ "gen_optimal_input_initial_conditions can only be used with "
487
+ "an AcquisitionFunction that has an optimal_inputs attribute."
488
+ )
489
+ frac_random : float = options .get ("frac_random" , 0.0 )
490
+ if not 0 <= frac_random <= 1 :
491
+ raise ValueError (
492
+ f"frac_random must take on values in (0,1). Value: { frac_random } "
493
+ )
494
+
495
+ batch_limit = options .get ("batch_limit" )
496
+ num_optima = acq_function .optimal_inputs .shape [:- 1 ].numel ()
497
+ suggestions = acq_function .optimal_inputs .reshape (num_optima , - 1 )
498
+ X = torch .empty (0 , q , bounds .shape [1 ], dtype = bounds .dtype )
499
+ num_random = round (raw_samples * frac_random )
500
+ if num_random > 0 :
501
+ X_rnd = sample_q_batches_from_polytope (
502
+ n = num_random ,
503
+ q = q ,
504
+ bounds = bounds ,
505
+ n_burnin = options .get ("n_burnin" , 10000 ),
506
+ n_thinning = options .get ("n_thinning" , 32 ),
507
+ equality_constraints = equality_constraints ,
508
+ inequality_constraints = inequality_constraints ,
509
+ )
510
+ X = torch .cat ((X , X_rnd ))
511
+
512
+ if num_random < raw_samples :
513
+ X_perturbed = sample_points_around_best (
514
+ acq_function = acq_function ,
515
+ n_discrete_points = q * (raw_samples - num_random ),
516
+ sigma = options .get ("sample_around_best_sigma" , 1e-2 ),
517
+ bounds = bounds ,
518
+ best_X = suggestions ,
519
+ )
520
+ X_perturbed = X_perturbed .view (
521
+ raw_samples - num_random , q , bounds .shape [- 1 ]
522
+ ).cpu ()
523
+ X = torch .cat ((X , X_perturbed ))
524
+
525
+ if options .get ("sample_around_best" , False ):
526
+ X_best = sample_points_around_best (
527
+ acq_function = acq_function ,
528
+ n_discrete_points = q * raw_samples ,
529
+ sigma = options .get ("sample_around_best_sigma" , 1e-2 ),
530
+ bounds = bounds ,
531
+ )
532
+ X_best = X_best .view (raw_samples , q , bounds .shape [- 1 ]).cpu ()
533
+ X = torch .cat ((X , X_best ))
534
+
535
+ with torch .no_grad ():
536
+ if batch_limit is None :
537
+ batch_limit = X .shape [0 ]
538
+ # Evaluate the acquisition function on `X_rnd` using `batch_limit`
539
+ # sized chunks.
540
+ acq_vals = torch .cat (
541
+ [
542
+ acq_function (x_ .to (device = device )).cpu ()
543
+ for x_ in X .split (split_size = batch_limit , dim = 0 )
544
+ ],
545
+ dim = 0 ,
546
+ )
547
+ idx = boltzmann_sample (
548
+ function_values = acq_vals ,
549
+ num_samples = num_restarts ,
550
+ eta = options .get ("eta" , 2.0 ),
551
+ )
552
+ # set the respective initial conditions to the sampled optimizers
553
+ return X [idx ]
554
+
555
+
471
556
def gen_one_shot_kg_initial_conditions (
472
557
acq_function : qKnowledgeGradient ,
473
558
bounds : Tensor ,
@@ -602,59 +687,59 @@ def gen_one_shot_hvkg_initial_conditions(
602
687
) -> Tensor | None :
603
688
r"""Generate a batch of smart initializations for qHypervolumeKnowledgeGradient.
604
689
605
- This function generates initial conditions for optimizing one-shot HVKG using
606
- the hypervolume maximizing set (of fixed size) under the posterior mean.
607
- Intutively, the hypervolume maximizing set of the fantasized posterior mean
608
- will often be close to a hypervolume maximizing set under the current posterior
609
- mean. This function uses that fact to generate the initial conditions
610
- for the fantasy points. Specifically, a fraction of `1 - frac_random` (see
611
- options) of the restarts are generated by learning the hypervolume maximizing sets
612
- under the current posterior mean, where each hypervolume maximizing set is
613
- obtained from maximizing the hypervolume from a different starting point. Given
614
- a hypervolume maximizing set, the `q` candidate points are selected using to the
615
- standard initialization strategy in `gen_batch_initial_conditions`, with the fixed
616
- hypervolume maximizing set. The remaining `frac_random` restarts fantasy points
617
- as well as all `q` candidate points are chosen according to the standard
618
- initialization strategy in `gen_batch_initial_conditions`.
619
-
620
- Args:
621
- acq_function: The qKnowledgeGradient instance to be optimized.
622
- bounds: A `2 x d` tensor of lower and upper bounds for each column of
623
- task features.
624
- q: The number of candidates to consider.
625
- num_restarts: The number of starting points for multistart acquisition
626
- function optimization.
627
- raw_samples: The number of raw samples to consider in the initialization
628
- heuristic.
629
- fixed_features: A map `{feature_index: value}` for features that
630
- should be fixed to a particular value during generation.
631
- options: Options for initial condition generation. These contain all
632
- settings for the standard heuristic initialization from
633
- `gen_batch_initial_conditions`. In addition, they contain
634
- `frac_random` (the fraction of fully random fantasy points),
635
- `num_inner_restarts` and `raw_inner_samples` (the number of random
636
- restarts and raw samples for solving the posterior objective
637
- maximization problem, respectively) and `eta` (temperature parameter
638
- for sampling heuristic from posterior objective maximizers).
639
- inequality constraints: A list of tuples (indices, coefficients, rhs),
640
- with each tuple encoding an inequality constraint of the form
641
- `\sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
642
- equality constraints: A list of tuples (indices, coefficients, rhs),
643
- with each tuple encoding an inequality constraint of the form
644
- `\sum_i (X[indices[i]] * coefficients[i]) = rhs`.
645
-
646
- Returns:
647
- A `num_restarts x q' x d` tensor that can be used as initial conditions
648
- for `optimize_acqf()`. Here `q' = q + num_fantasies` is the total number
649
- of points (candidate points plus fantasy points).
650
-
651
- Example:
652
- >>> qHVKG = qHypervolumeKnowledgeGradient(model, ref_point)
653
- >>> bounds = torch.tensor([[0., 0.], [1., 1.]])
654
- >>> Xinit = gen_one_shot_hvkg_initial_conditions(
655
- >>> qHVKG, bounds, q=3, num_restarts=10, raw_samples=512,
656
- >>> options={"frac_random": 0.25},
657
- >>> )
690
+ This function generates initial conditions for optimizing one-shot HVKG using
691
+ the hypervolume maximizing set (of fixed size) under the posterior mean.
692
+ Intutively, the hypervolume maximizing set of the fantasized posterior mean
693
+ will often be close to a hypervolume maximizing set under the current posterior
694
+ mean. This function uses that fact to generate the initial conditions
695
+ for the fantasy points. Specifically, a fraction of `1 - frac_random` (see
696
+ options) of the restarts are generated by learning the hypervolume maximizing sets
697
+ under the current posterior mean, where each hypervolume maximizing set is
698
+ obtained from maximizing the hypervolume from a different starting point. Given
699
+ a hypervolume maximizing set, the `q` candidate points are selected using to the
700
+ standard initialization strategy in `gen_batch_initial_conditions`, with the fixed
701
+ hypervolume maximizing set. The remaining `frac_random` restarts fantasy points
702
+ as well as all `q` candidate points are chosen according to the standard
703
+ initialization strategy in `gen_batch_initial_conditions`.
704
+
705
+ Args:
706
+ acq_function: The qKnowledgeGradient instance to be optimized.
707
+ bounds: A `2 x d` tensor of lower and upper bounds for each column of
708
+ task features.
709
+ q: The number of candidates to consider.
710
+ num_restarts: The number of starting points for multistart acquisition
711
+ function optimization.
712
+ raw_samples: The number of raw samples to consider in the initialization
713
+ heuristic.
714
+ fixed_features: A map `{feature_index: value}` for features that
715
+ should be fixed to a particular value during generation.
716
+ options: Options for initial condition generation. These contain all
717
+ settings for the standard heuristic initialization from
718
+ `gen_batch_initial_conditions`. In addition, they contain
719
+ `frac_random` (the fraction of fully random fantasy points),
720
+ `num_inner_restarts` and `raw_inner_samples` (the number of random
721
+ restarts and raw samples for solving the posterior objective
722
+ maximization problem, respectively) and `eta` (temperature parameter
723
+ for sampling heuristic from posterior objective maximizers).
724
+ inequality constraints: A list of tuples (indices, coefficients, rhs),
725
+ with each tuple encoding an inequality constraint of the form
726
+ `\sum_i (X[indices[i]] * coefficients[i]) >= rhs`.
727
+ equality constraints: A list of tuples (indices, coefficients, rhs),
728
+ with each tuple encoding an inequality constraint of the form
729
+ `\sum_i (X[indices[i]] * coefficients[i]) = rhs`.
730
+
731
+ Returns:
732
+ A `num_restarts x q' x d` tensor that can be used as initial conditions
733
+ for `optimize_acqf()`. Here `q' = q + num_fantasies` is the total number
734
+ of points (candidate points plus fantasy points).
735
+
736
+ gen_batch_initial_conditions Example:
737
+ >>> qHVKG = qHypervolumeKnowledgeGradient(model, ref_point)
738
+ >>> bounds = torch.tensor([[0., 0.], [1., 1.]])
739
+ >>> Xinit = gen_one_shot_hvkg_initial_conditions(
740
+ >>> qHVKG, bounds, q=3, num_restarts=10, raw_samples=512,
741
+ >>> options={"frac_random": 0.25},
742
+ >>> )
658
743
"""
659
744
from botorch .optim .optimize import optimize_acqf
660
745
@@ -1136,6 +1221,7 @@ def sample_points_around_best(
1136
1221
best_pct : float = 5.0 ,
1137
1222
subset_sigma : float = 1e-1 ,
1138
1223
prob_perturb : float | None = None ,
1224
+ best_X : Tensor | None = None ,
1139
1225
) -> Tensor | None :
1140
1226
r"""Find best points and sample nearby points.
1141
1227
@@ -1154,60 +1240,62 @@ def sample_points_around_best(
1154
1240
An optional `n_discrete_points x d`-dim tensor containing the
1155
1241
sampled points. This is None if no baseline points are found.
1156
1242
"""
1157
- X = get_X_baseline (acq_function = acq_function )
1158
- if X is None :
1159
- return
1160
- with torch .no_grad ():
1161
- try :
1162
- posterior = acq_function .model .posterior (X )
1163
- except AttributeError :
1164
- warnings .warn (
1165
- "Failed to sample around previous best points." ,
1166
- BotorchWarning ,
1167
- stacklevel = 3 ,
1168
- )
1243
+ if best_X is None :
1244
+ X = get_X_baseline (acq_function = acq_function )
1245
+ if X is None :
1169
1246
return
1170
- mean = posterior .mean
1171
- while mean .ndim > 2 :
1172
- # take average over batch dims
1173
- mean = mean .mean (dim = 0 )
1174
- try :
1175
- f_pred = acq_function .objective (mean )
1176
- # Some acquisition functions do not have an objective
1177
- # and for some acquisition functions the objective is None
1178
- except (AttributeError , TypeError ):
1179
- f_pred = mean
1180
- if hasattr (acq_function , "maximize" ):
1181
- # make sure that the optimiztaion direction is set properly
1182
- if not acq_function .maximize :
1183
- f_pred = - f_pred
1184
- try :
1185
- # handle constraints for EHVI-based acquisition functions
1186
- constraints = acq_function .constraints
1187
- if constraints is not None :
1188
- neg_violation = - torch .stack (
1189
- [c (mean ).clamp_min (0.0 ) for c in constraints ], dim = - 1
1190
- ).sum (dim = - 1 )
1191
- feas = neg_violation == 0
1192
- if feas .any ():
1193
- f_pred [~ feas ] = float ("-inf" )
1194
- else :
1195
- # set objective equal to negative violation
1196
- f_pred = neg_violation
1197
- except AttributeError :
1198
- pass
1199
- if f_pred .ndim == mean .ndim and f_pred .shape [- 1 ] > 1 :
1200
- # multi-objective
1201
- # find pareto set
1202
- is_pareto = is_non_dominated (f_pred )
1203
- best_X = X [is_pareto ]
1204
- else :
1205
- if f_pred .shape [- 1 ] == 1 :
1206
- f_pred = f_pred .squeeze (- 1 )
1207
- n_best = max (1 , round (X .shape [0 ] * best_pct / 100 ))
1208
- # the view() is to ensure that best_idcs is not a scalar tensor
1209
- best_idcs = torch .topk (f_pred , n_best ).indices .view (- 1 )
1210
- best_X = X [best_idcs ]
1247
+ with torch .no_grad ():
1248
+ try :
1249
+ posterior = acq_function .model .posterior (X )
1250
+ except AttributeError :
1251
+ warnings .warn (
1252
+ "Failed to sample around previous best points." ,
1253
+ BotorchWarning ,
1254
+ stacklevel = 3 ,
1255
+ )
1256
+ return
1257
+ mean = posterior .mean
1258
+ while mean .ndim > 2 :
1259
+ # take average over batch dims
1260
+ mean = mean .mean (dim = 0 )
1261
+ try :
1262
+ f_pred = acq_function .objective (mean )
1263
+ # Some acquisition functions do not have an objective
1264
+ # and for some acquisition functions the objective is None
1265
+ except (AttributeError , TypeError ):
1266
+ f_pred = mean
1267
+ if hasattr (acq_function , "maximize" ):
1268
+ # make sure that the optimiztaion direction is set properly
1269
+ if not acq_function .maximize :
1270
+ f_pred = - f_pred
1271
+ try :
1272
+ # handle constraints for EHVI-based acquisition functions
1273
+ constraints = acq_function .constraints
1274
+ if constraints is not None :
1275
+ neg_violation = - torch .stack (
1276
+ [c (mean ).clamp_min (0.0 ) for c in constraints ], dim = - 1
1277
+ ).sum (dim = - 1 )
1278
+ feas = neg_violation == 0
1279
+ if feas .any ():
1280
+ f_pred [~ feas ] = float ("-inf" )
1281
+ else :
1282
+ # set objective equal to negative violation
1283
+ f_pred = neg_violation
1284
+ except AttributeError :
1285
+ pass
1286
+ if f_pred .ndim == mean .ndim and f_pred .shape [- 1 ] > 1 :
1287
+ # multi-objective
1288
+ # find pareto set
1289
+ is_pareto = is_non_dominated (f_pred )
1290
+ best_X = X [is_pareto ]
1291
+ else :
1292
+ if f_pred .shape [- 1 ] == 1 :
1293
+ f_pred = f_pred .squeeze (- 1 )
1294
+ n_best = max (1 , round (X .shape [0 ] * best_pct / 100 ))
1295
+ # the view() is to ensure that best_idcs is not a scalar tensor
1296
+ best_idcs = torch .topk (f_pred , n_best ).indices .view (- 1 )
1297
+ best_X = X [best_idcs ]
1298
+
1211
1299
use_perturbed_sampling = best_X .shape [- 1 ] >= 20 or prob_perturb is not None
1212
1300
n_trunc_normal_points = (
1213
1301
n_discrete_points // 2 if use_perturbed_sampling else n_discrete_points
0 commit comments