Skip to content

Commit fcb9584

Browse files
authored
Merge branch 'main' into main
2 parents aa6f5cf + eaa6fb2 commit fcb9584

File tree

12 files changed

+2407
-4324
lines changed

12 files changed

+2407
-4324
lines changed

botorch/acquisition/input_constructors.py

+1
Original file line numberDiff line numberDiff line change
@@ -843,6 +843,7 @@ def construct_inputs_qNEHVI(
843843
"cache_pending": kwargs.get("cache_pending", True),
844844
"max_iep": kwargs.get("max_iep", 0),
845845
"incremental_nehvi": kwargs.get("incremental_nehvi", True),
846+
"cache_root": kwargs.get("cache_root", True),
846847
}
847848

848849

botorch/acquisition/multi_objective/objective.py

-1
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,6 @@ def __init__(
8282
r"""Initialize Objective.
8383
8484
Args:
85-
weights: `m'`-dim tensor of outcome weights.
8685
outcomes: A list of the `m'` indices that the weights should be
8786
applied to.
8887
num_outcomes: The total number of outcomes `m`

botorch/exceptions/warnings.py

+6
Original file line numberDiff line numberDiff line change
@@ -49,3 +49,9 @@ class BotorchTensorDimensionWarning(BotorchWarning):
4949
r"""Warning raised when a tensor possibly violates a botorch convention."""
5050

5151
pass
52+
53+
54+
class UserInputWarning(BotorchWarning):
55+
r"""Warning raised when a potential issue is detected with user provided inputs."""
56+
57+
pass

botorch/models/transforms/input.py

+41-1
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
import torch
2424
from botorch.exceptions.errors import BotorchTensorDimensionError
25+
from botorch.exceptions.warnings import UserInputWarning
2526
from botorch.models.transforms.utils import subset_transform
2627
from botorch.models.utils import fantasize
2728
from botorch.utils.rounding import approximate_round, OneHotArgmaxSTE, RoundSTE
@@ -505,6 +506,7 @@ def __init__(
505506
transform_on_fantasize: bool = True,
506507
reverse: bool = False,
507508
min_range: float = 1e-8,
509+
learn_bounds: Optional[bool] = None,
508510
) -> None:
509511
r"""Normalize the inputs to the unit cube.
510512
@@ -527,7 +529,13 @@ def __init__(
527529
the inputs.
528530
min_range: Amount of noise to add to the range to ensure no division by
529531
zero errors.
532+
learn_bounds: Whether to learn the bounds in train mode. Defaults
533+
to False if bounds are provided, otherwise defaults to True.
530534
"""
535+
if learn_bounds is not None:
536+
self.learn_coefficients = learn_bounds
537+
else:
538+
self.learn_coefficients = bounds is None
531539
transform_dimension = d if indices is None else len(indices)
532540
if bounds is not None:
533541
if indices is not None and bounds.size(-1) == d:
@@ -544,7 +552,12 @@ def __init__(
544552
else:
545553
coefficient = torch.ones(*batch_shape, 1, transform_dimension)
546554
offset = torch.zeros(*batch_shape, 1, transform_dimension)
547-
self.learn_coefficients = True
555+
if self.learn_coefficients is False:
556+
warn(
557+
"learn_bounds is False and no bounds were provided. The bounds "
558+
"will not be updated and the transform will be a no-op.",
559+
UserInputWarning,
560+
)
548561
super().__init__(
549562
d=d,
550563
coefficient=coefficient,
@@ -586,6 +599,21 @@ def _update_coefficients(self, X) -> None:
586599
self._coefficient = torch.amax(X, dim=reduce_dims).unsqueeze(-2) - self.offset
587600
self._coefficient.clamp_(min=self.min_range)
588601

602+
def get_init_args(self) -> Dict[str, Any]:
603+
r"""Get the arguments necessary to construct an exact copy of the transform."""
604+
return {
605+
"d": self._d,
606+
"indices": getattr(self, "indices", None),
607+
"bounds": self.bounds,
608+
"batch_shape": self.batch_shape,
609+
"transform_on_train": self.transform_on_train,
610+
"transform_on_eval": self.transform_on_eval,
611+
"transform_on_fantasize": self.transform_on_fantasize,
612+
"reverse": self.reverse,
613+
"min_range": self.min_range,
614+
"learn_bounds": self.learn_bounds,
615+
}
616+
589617

590618
class InputStandardize(AffineInputTransform):
591619
r"""Standardize inputs (zero mean, unit variance).
@@ -796,6 +824,18 @@ def equals(self, other: InputTransform) -> bool:
796824
and self.tau == other.tau
797825
)
798826

827+
def get_init_args(self) -> Dict[str, Any]:
828+
r"""Get the arguments necessary to construct an exact copy of the transform."""
829+
return {
830+
"integer_indices": self.integer_indices,
831+
"categorical_features": self.categorical_features,
832+
"transform_on_train": self.transform_on_train,
833+
"transform_on_eval": self.transform_on_eval,
834+
"transform_on_fantasize": self.transform_on_fantasize,
835+
"approximate": self.approximate,
836+
"tau": self.tau,
837+
}
838+
799839

800840
class Log10(ReversibleInputTransform, Module):
801841
r"""A base-10 log transformation."""

scripts/run_tutorials.py

+3-23
Original file line numberDiff line numberDiff line change
@@ -10,16 +10,13 @@
1010
import datetime
1111
import os
1212
import subprocess
13-
import tempfile
1413
import time
1514
from pathlib import Path
1615
from subprocess import CalledProcessError
1716
from typing import Any, Dict, Optional, Tuple
1817

19-
import nbformat
2018
import pandas as pd
2119
from memory_profiler import memory_usage
22-
from nbconvert import PythonExporter
2320

2421

2522
IGNORE_ALWAYS = { # ignored in smoke tests and full runs
@@ -30,7 +27,6 @@
3027
RUN_IF_SMOKE_TEST_IGNORE_IF_STANDARD = { # only used in smoke tests
3128
"thompson_sampling.ipynb", # very slow without KeOps + GPU
3229
"composite_mtbo.ipynb", # TODO: very slow, figure out if we can make it faster
33-
"Multi_objective_multi_fidelity_BO.ipynb", # TODO: very slow, speed up
3430
# Causing the tutorials to crash when run without smoke test. Likely OOM.
3531
# Fix planned.
3632
"constraint_active_search.ipynb",
@@ -65,33 +61,18 @@ def get_output_file_path(smoke_test: bool) -> str:
6561
return fname
6662

6763

68-
def parse_ipynb(file: Path) -> str:
69-
with open(file, "r") as nb_file:
70-
nb_str = nb_file.read()
71-
nb = nbformat.reads(nb_str, nbformat.NO_CONVERT)
72-
exporter = PythonExporter()
73-
script, _ = exporter.from_notebook_node(nb)
74-
return script
75-
76-
7764
def run_script(
78-
script: str, timeout_minutes: int, env: Optional[Dict[str, str]] = None
65+
tutorial: Path, timeout_minutes: int, env: Optional[Dict[str, str]] = None
7966
) -> None:
80-
# need to keep the file around & close it so subprocess does not run into I/O issues
81-
with tempfile.NamedTemporaryFile(delete=False) as tf:
82-
tf_name = tf.name
83-
with open(tf_name, "w") as tmp_script:
84-
tmp_script.write(script)
8567
if env is not None:
8668
env = {**os.environ, **env}
8769
run_out = subprocess.run(
88-
["ipython", tf_name],
70+
["papermill", tutorial, "|"],
8971
capture_output=True,
9072
text=True,
9173
env=env,
9274
timeout=timeout_minutes * 60,
9375
)
94-
os.remove(tf_name)
9576
return run_out
9677

9778

@@ -103,13 +84,12 @@ def run_tutorial(
10384
them as a string, and returns runtime and memory information as a dict.
10485
"""
10586
timeout_minutes = 5 if smoke_test else 30
106-
script = parse_ipynb(tutorial)
10787
tic = time.monotonic()
10888
print(f"Running tutorial {tutorial.name}.")
10989
env = {"SMOKE_TEST": "True"} if smoke_test else None
11090
try:
11191
mem_usage, run_out = memory_usage(
112-
(run_script, (script, timeout_minutes), {"env": env}),
92+
(run_script, (tutorial, timeout_minutes), {"env": env}),
11393
retval=True,
11494
include_children=True,
11595
)

setup.py

+1
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
"kaleido",
2424
"matplotlib",
2525
"memory_profiler",
26+
"papermill",
2627
"pykeops",
2728
"torchvision",
2829
]

test/acquisition/test_input_constructors.py

+3
Original file line numberDiff line numberDiff line change
@@ -715,6 +715,7 @@ def test_construct_inputs_qNEHVI(self):
715715
self.assertTrue(kwargs["cache_pending"])
716716
self.assertEqual(kwargs["max_iep"], 0)
717717
self.assertTrue(kwargs["incremental_nehvi"])
718+
self.assertTrue(kwargs["cache_root"])
718719

719720
# Test check for block designs
720721
mock_model = mock.Mock()
@@ -748,6 +749,7 @@ def test_construct_inputs_qNEHVI(self):
748749
cache_pending=False,
749750
max_iep=1,
750751
incremental_nehvi=False,
752+
cache_root=False,
751753
)
752754
ref_point_expected = objective(objective_thresholds)
753755
self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
@@ -768,6 +770,7 @@ def test_construct_inputs_qNEHVI(self):
768770
self.assertFalse(kwargs["cache_pending"])
769771
self.assertEqual(kwargs["max_iep"], 1)
770772
self.assertFalse(kwargs["incremental_nehvi"])
773+
self.assertFalse(kwargs["cache_root"])
771774

772775
# Test with risk measures.
773776
with self.assertRaisesRegex(UnsupportedError, "feasibility-weighted"):

test/exceptions/test_warnings.py

+3
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
InputDataWarning,
1616
OptimizationWarning,
1717
SamplingWarning,
18+
UserInputWarning,
1819
)
1920
from botorch.utils.testing import BotorchTestCase
2021

@@ -28,6 +29,7 @@ def test_botorch_warnings_hierarchy(self):
2829
self.assertIsInstance(OptimizationWarning(), BotorchWarning)
2930
self.assertIsInstance(SamplingWarning(), BotorchWarning)
3031
self.assertIsInstance(BotorchTensorDimensionWarning(), BotorchWarning)
32+
self.assertIsInstance(UserInputWarning(), BotorchWarning)
3133

3234
def test_botorch_warnings(self):
3335
for WarningClass in (
@@ -38,6 +40,7 @@ def test_botorch_warnings(self):
3840
InputDataWarning,
3941
OptimizationWarning,
4042
SamplingWarning,
43+
UserInputWarning,
4144
):
4245
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
4346
warnings.warn("message", WarningClass)

test/models/test_deterministic.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def test_AffineDeterministicModel(self):
9696
X = torch.rand(*shape)
9797
p = model.posterior(X)
9898
mean_exp = model.b + (X.unsqueeze(-1) * a).sum(dim=-2)
99-
self.assertTrue(torch.equal(p.mean, mean_exp))
99+
self.assertAllClose(p.mean, mean_exp)
100100
# # test two-dim output
101101
a = torch.rand(3, 2)
102102
model = AffineDeterministicModel(a)
@@ -105,7 +105,7 @@ def test_AffineDeterministicModel(self):
105105
X = torch.rand(*shape)
106106
p = model.posterior(X)
107107
mean_exp = model.b + (X.unsqueeze(-1) * a).sum(dim=-2)
108-
self.assertTrue(torch.equal(p.mean, mean_exp))
108+
self.assertAllClose(p.mean, mean_exp)
109109
# test subset output
110110
X = torch.rand(4, 3)
111111
subset_model = model.subset_output([0])

test/models/transforms/test_input.py

+20-2
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
import torch
1212
from botorch import settings
1313
from botorch.exceptions.errors import BotorchTensorDimensionError
14+
from botorch.exceptions.warnings import UserInputWarning
1415
from botorch.models.transforms.input import (
1516
AffineInputTransform,
1617
AppendFeatures,
@@ -155,16 +156,29 @@ def test_normalize(self):
155156
self.assertEqual(nlz._d, 2)
156157
self.assertEqual(nlz.mins.shape, torch.Size([3, 1, 2]))
157158
self.assertEqual(nlz.ranges.shape, torch.Size([3, 1, 2]))
159+
self.assertTrue(nlz.equals(Normalize(**nlz.get_init_args())))
158160

159-
# basic init, fixed bounds
161+
# learn_bounds=False with no bounds.
162+
with self.assertWarnsRegex(UserInputWarning, "learn_bounds"):
163+
Normalize(d=2, learn_bounds=False)
164+
165+
# learn_bounds=True with bounds provided.
160166
bounds = torch.zeros(2, 2, device=self.device, dtype=dtype)
167+
nlz = Normalize(d=2, bounds=bounds, learn_bounds=True)
168+
self.assertTrue(nlz.learn_bounds)
169+
self.assertTrue(torch.equal(nlz.mins, bounds[..., 0:1, :]))
170+
self.assertTrue(
171+
torch.equal(nlz.ranges, bounds[..., 1:2, :] - bounds[..., 0:1, :])
172+
)
173+
174+
# basic init, fixed bounds
161175
nlz = Normalize(d=2, bounds=bounds)
162176
self.assertFalse(nlz.learn_bounds)
163177
self.assertTrue(nlz.training)
164178
self.assertEqual(nlz._d, 2)
165179
self.assertTrue(torch.equal(nlz.mins, bounds[..., 0:1, :]))
166180
self.assertTrue(
167-
torch.equal(nlz.mins, bounds[..., 1:2, :] - bounds[..., 0:1, :])
181+
torch.equal(nlz.ranges, bounds[..., 1:2, :] - bounds[..., 0:1, :])
168182
)
169183
# with grad
170184
bounds.requires_grad = True
@@ -180,6 +194,7 @@ def test_normalize(self):
180194
nlz.eval()
181195
self.assertIsNone(nlz.coefficient.grad_fn)
182196
self.assertIsNone(nlz.offset.grad_fn)
197+
self.assertTrue(nlz.equals(Normalize(**nlz.get_init_args())))
183198

184199
# basic init, provided indices
185200
with self.assertRaises(ValueError):
@@ -204,6 +219,7 @@ def test_normalize(self):
204219
== torch.tensor([0], dtype=torch.long, device=self.device)
205220
).all()
206221
)
222+
self.assertTrue(nlz.equals(Normalize(**nlz.get_init_args())))
207223

208224
# test .to
209225
other_dtype = torch.float if dtype == torch.double else torch.double
@@ -594,13 +610,15 @@ def test_round_transform(self):
594610
self.assertTrue(round_tf.training)
595611
self.assertFalse(round_tf.approximate)
596612
self.assertEqual(round_tf.tau, 1e-3)
613+
self.assertTrue(round_tf.equals(Round(**round_tf.get_init_args())))
597614

598615
# With tensor indices.
599616
round_tf = Round(
600617
integer_indices=torch.tensor(int_idcs, dtype=dtype, device=self.device),
601618
categorical_features=categorical_feats,
602619
)
603620
self.assertEqual(round_tf.integer_indices.tolist(), int_idcs)
621+
self.assertTrue(round_tf.equals(Round(**round_tf.get_init_args())))
604622

605623
# basic usage
606624
for batch_shape, approx, categorical_features in itertools.product(

0 commit comments

Comments
 (0)