Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor to Remove Lower and Upper Bounds from Models #459

Open
wants to merge 17 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 19 additions & 4 deletions aepsych/acquisition/lookahead.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,8 @@ def construct_inputs_local_lookahead(
class GlobalLookaheadAcquisitionFunction(LookaheadAcquisitionFunction):
def __init__(
self,
lb: Tensor,
ub: Tensor,
model: GPyTorchModel,
lookahead_type: str = "levelset",
target: Optional[float] = None,
Expand All @@ -215,9 +217,14 @@ def __init__(
A global look-ahead acquisition function.

Args:
model: The gpytorch model.
target: Threshold value to target in p-space.
Xq: (m x d) global reference set.
lb (Tensor): Lower bounds of the input space.
ub (Tensor): Upper bounds of the input space.
model (GPyTorchModel): The gpytorch model.
lookahead_type (str, optional): Type of lookahead to use. Defaults to "levelset".
target (float, optional): Threshold value to target in p-space.
posterior_transform (PosteriorTransform, optional): Posterior transform to use. Defaults to None.
query_set_size (int, optional): Size of the query set. Defaults to 256.
Xq (Tensor, optional): (m x d) global reference set. Defaults to None.
"""
super().__init__(model=model, target=target, lookahead_type=lookahead_type)
self.posterior_transform = posterior_transform
Expand All @@ -236,7 +243,7 @@ def __init__(
assert int(query_set_size) == query_set_size # make sure casting is safe
# if the asserts above pass and Xq is None, query_set_size is not None so this is safe
query_set_size = int(query_set_size) # cast
Xq = make_scaled_sobol(model.lb, model.ub, query_set_size)
Xq = make_scaled_sobol(lb, ub, query_set_size)
self.register_buffer("Xq", Xq)

@t_batch_mode_transform(expected_q=1)
Expand Down Expand Up @@ -282,6 +289,8 @@ def _compute_acqf(self, Px: Tensor, P1: Tensor, P0: Tensor, py1: Tensor) -> Tens
class ApproxGlobalSUR(GlobalSUR):
def __init__(
self,
lb: Tensor,
ub: Tensor,
model: GPyTorchModel,
lookahead_type="levelset",
target: Optional[float] = None,
Expand All @@ -292,6 +301,8 @@ def __init__(
lookahead_type == "levelset"
), f"ApproxGlobalSUR only supports lookahead on level set, got {lookahead_type}!"
super().__init__(
lb=lb,
ub=ub,
model=model,
target=target,
lookahead_type=lookahead_type,
Expand Down Expand Up @@ -345,6 +356,8 @@ class SMOCU(GlobalLookaheadAcquisitionFunction):

def __init__(
self,
lb: Tensor,
ub: Tensor,
model: GPyTorchModel,
lookahead_type="posterior",
target: Optional[float] = None,
Expand All @@ -353,6 +366,8 @@ def __init__(
k: Optional[float] = 20.0,
) -> None:
super().__init__(
lb=lb,
ub=ub,
model=model,
target=target,
lookahead_type=lookahead_type,
Expand Down
1 change: 1 addition & 0 deletions aepsych/benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ def run_experiment(
np.random.seed(seed)
config_dict["common"]["lb"] = str(problem.lb.tolist())
config_dict["common"]["ub"] = str(problem.ub.tolist())
config_dict["common"]["dim"] = str(problem.lb.shape[0])
config_dict["common"]["parnames"] = str(
[f"par{i}" for i in range(len(problem.ub.tolist()))]
)
Expand Down
13 changes: 9 additions & 4 deletions aepsych/benchmark/example_problems.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
novel_discrimination_testfun,
)
from aepsych.models import GPClassificationModel
from aepsych.models.inducing_point_allocators import KMeansAllocator, SobolAllocator
from aepsych.models.utils import select_inducing_points

"""The DiscrimLowDim, DiscrimHighDim, ContrastSensitivity6d, and Hartmann6Binary classes
are copied from bernoulli_lse github repository (https://github.com/facebookresearch/bernoulli_lse)
Expand Down Expand Up @@ -103,13 +105,16 @@ def __init__(
)
y = torch.LongTensor(self.data[:, 0])
x = torch.Tensor(self.data[:, 1:])
inducing_size = 100
inducing_points = select_inducing_points(
inducing_size=inducing_size, allocator=SobolAllocator(bounds=self.bounds)
)

# Fit a model, with a large number of inducing points
self.m = GPClassificationModel(
lb=self.bounds[0],
ub=self.bounds[1],
inducing_size=100,
inducing_point_method="kmeans++",
inducing_points=inducing_points,
inducing_size=inducing_size,
inducing_point_method=KMeansAllocator(),
)

self.m.fit(
Expand Down
12 changes: 11 additions & 1 deletion aepsych/generators/acqf_thompson_sampler_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,17 @@ class AcqfThompsonSamplerGenerator(AEPsychGenerator):

def __init__(
self,
lb: torch.Tensor,
ub: torch.Tensor,
acqf: AcquisitionFunction,
acqf_kwargs: Optional[Dict[str, Any]] = None,
samps: int = 1000,
stimuli_per_trial: int = 1,
) -> None:
"""Initialize OptimizeAcqfGenerator.
Args:
lb (torch.Tensor): Lower bounds for the optimization.
ub (torch.Tensor): Upper bounds for the optimization.
acqf (AcquisitionFunction): Acquisition function to use.
acqf_kwargs (Dict[str, object], optional): Extra arguments to
pass to acquisition function. Defaults to no arguments.
Expand All @@ -60,6 +64,8 @@ def __init__(
self.acqf_kwargs = acqf_kwargs
self.samps = samps
self.stimuli_per_trial = stimuli_per_trial
self.lb = lb
self.ub = ub

def _instantiate_acquisition_fn(self, model: ModelProtocol) -> AcquisitionFunction:
if self.acqf == AnalyticExpectedUtilityOfBestOption:
Expand Down Expand Up @@ -102,7 +108,7 @@ def _gen(
starttime = time.time()

seed = gen_options.get("seed")
bounds = torch.tensor(np.c_[model.lb, model.ub]).T.cpu()
bounds = torch.tensor(np.c_[self.lb, self.ub]).T.cpu()
bounds_cpu = bounds.cpu()
effective_dim = bounds.shape[-1] * num_points
if effective_dim <= SobolEngine.MAXDIM:
Expand Down Expand Up @@ -130,12 +136,16 @@ def _gen(
@classmethod
def from_config(cls, config: Config) -> AcqfThompsonSamplerGenerator:
classname = cls.__name__
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
acqf = config.getobj(classname, "acqf", fallback=None)
extra_acqf_args = cls._get_acqf_options(acqf, config)
stimuli_per_trial = config.getint(classname, "stimuli_per_trial")
samps = config.getint(classname, "samps", fallback=1000)

return cls(
lb=lb,
ub=ub,
acqf=acqf,
acqf_kwargs=extra_acqf_args,
samps=samps,
Expand Down
16 changes: 13 additions & 3 deletions aepsych/generators/epsilon_greedy_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,25 +15,35 @@


class EpsilonGreedyGenerator(AEPsychGenerator):
def __init__(self, subgenerator: AEPsychGenerator, epsilon: float = 0.1) -> None:
def __init__(
self,
lb: torch.Tensor,
ub: torch.Tensor,
subgenerator: AEPsychGenerator,
epsilon: float = 0.1,
) -> None:
self.subgenerator = subgenerator
self.epsilon = epsilon
self.lb = lb
self.ub = ub

@classmethod
def from_config(cls, config: Config) -> "EpsilonGreedyGenerator":
classname = cls.__name__
lb = torch.tensor(config.getlist(classname, "lb"))
ub = torch.tensor(config.getlist(classname, "ub"))
subgen_cls = config.getobj(
classname, "subgenerator", fallback=OptimizeAcqfGenerator
)
subgen = subgen_cls.from_config(config)
epsilon = config.getfloat(classname, "epsilon", fallback=0.1)
return cls(subgenerator=subgen, epsilon=epsilon)
return cls(lb=lb, ub=ub, subgenerator=subgen, epsilon=epsilon)

def gen(self, num_points: int, model: ModelProtocol) -> torch.Tensor:
if num_points > 1:
raise NotImplementedError("Epsilon-greedy batched gen is not implemented!")
if np.random.uniform() < self.epsilon:
sample = np.random.uniform(low=model.lb, high=model.ub)
sample = np.random.uniform(low=self.lb, high=self.ub)
return torch.tensor(sample).reshape(1, -1)
else:
return self.subgenerator.gen(num_points, model)
11 changes: 10 additions & 1 deletion aepsych/generators/monotonic_rejection_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,17 @@ class MonotonicRejectionGenerator(AEPsychGenerator[MonotonicRejectionGP]):
def __init__(
self,
acqf: MonotonicMCAcquisition,
lb: torch.Tensor,
ub: torch.Tensor,
acqf_kwargs: Optional[Dict[str, Any]] = None,
model_gen_options: Optional[Dict[str, Any]] = None,
explore_features: Optional[Sequence[int]] = None,
) -> None:
"""Initialize MonotonicRejectionGenerator.
Args:
acqf (AcquisitionFunction): Acquisition function to use.
lb (torch.Tensor): Lower bounds for the optimization.
ub (torch.Tensor): Upper bounds for the optimization.
acqf_kwargs (Dict[str, object], optional): Extra arguments to
pass to acquisition function. Defaults to no arguments.
model_gen_options: Dictionary with options for generating candidate, such as
Expand All @@ -63,6 +67,7 @@ def __init__(
self.acqf_kwargs = acqf_kwargs
self.model_gen_options = model_gen_options
self.explore_features = explore_features
self.bounds = torch.stack((lb, ub))

def _instantiate_acquisition_fn(
self, model: MonotonicRejectionGP
Expand Down Expand Up @@ -101,7 +106,7 @@ def gen(
)

# Augment bounds with deriv indicator
bounds = torch.cat((model.bounds_, torch.zeros(2, 1)), dim=1)
bounds = torch.cat((self.bounds, torch.zeros(2, 1)), dim=1)
# Fix deriv indicator to 0 during optimization
fixed_features = {(bounds.shape[1] - 1): 0.0}
# Fix explore features to random values
Expand Down Expand Up @@ -173,6 +178,8 @@ def from_config(cls, config: Config) -> "MonotonicRejectionGenerator":
classname = cls.__name__
acqf = config.getobj("common", "acqf", fallback=None)
extra_acqf_args = cls._get_acqf_options(acqf, config)
lb = torch.tensor(config.getlist(classname, "lb"))
ub = torch.tensor(config.getlist(classname, "ub"))

options = {}
options["num_restarts"] = config.getint(classname, "restarts", fallback=10)
Expand All @@ -198,6 +205,8 @@ def from_config(cls, config: Config) -> "MonotonicRejectionGenerator":

return cls(
acqf=acqf,
lb=lb,
ub=ub,
acqf_kwargs=extra_acqf_args,
model_gen_options=options,
explore_features=explore_features,
Expand Down
35 changes: 32 additions & 3 deletions aepsych/generators/optimize_acqf_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
# LICENSE file in the root directory of this source tree.
from __future__ import annotations

import inspect
import time
from typing import Any, Dict, Optional

Expand Down Expand Up @@ -39,6 +40,8 @@ class OptimizeAcqfGenerator(AEPsychGenerator):

def __init__(
self,
lb: torch.Tensor,
ub: torch.Tensor,
acqf: AcquisitionFunction,
acqf_kwargs: Optional[Dict[str, Any]] = None,
restarts: int = 10,
Expand All @@ -48,6 +51,8 @@ def __init__(
) -> None:
"""Initialize OptimizeAcqfGenerator.
Args:
lb (torch.Tensor): Lower bounds for the optimization.
ub (torch.Tensor): Upper bounds for the optimization.
acqf (AcquisitionFunction): Acquisition function to use.
acqf_kwargs (Dict[str, object], optional): Extra arguments to
pass to acquisition function. Defaults to no arguments.
Expand All @@ -64,15 +69,35 @@ def __init__(
self.samps = samps
self.max_gen_time = max_gen_time
self.stimuli_per_trial = stimuli_per_trial
self.lb = lb
self.ub = ub

def _instantiate_acquisition_fn(self, model: ModelProtocol) -> AcquisitionFunction:
if (
"lb" in inspect.signature(self.acqf).parameters
and "ub" in inspect.signature(self.acqf).parameters
):
if self.acqf == AnalyticExpectedUtilityOfBestOption:
return self.acqf(pref_model=model, lb=self.lb, ub=self.ub)

if self.acqf in self.baseline_requiring_acqfs:
return self.acqf(
model,
model.train_inputs[0],
lb=self.lb,
ub=self.ub,
**self.acqf_kwargs,
)

return self.acqf(model=model, lb=self.lb, ub=self.ub, **self.acqf_kwargs)

if self.acqf == AnalyticExpectedUtilityOfBestOption:
return self.acqf(pref_model=model)

if self.acqf in self.baseline_requiring_acqfs:
return self.acqf(model, model.train_inputs[0], **self.acqf_kwargs)
else:
return self.acqf(model=model, **self.acqf_kwargs)

return self.acqf(model=model, **self.acqf_kwargs)

def gen(self, num_points: int, model: ModelProtocol, **gen_options) -> torch.Tensor:
"""Query next point(s) to run by optimizing the acquisition function.
Expand Down Expand Up @@ -107,7 +132,7 @@ def _gen(

new_candidate, _ = optimize_acqf(
acq_function=acqf,
bounds=torch.stack([model.lb, model.ub]),
bounds=torch.stack([self.lb, self.ub]),
q=num_points,
num_restarts=self.restarts,
raw_samples=self.samps,
Expand All @@ -121,6 +146,8 @@ def _gen(
@classmethod
def from_config(cls, config: Config) -> "OptimizeAcqfGenerator":
classname = cls.__name__
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
acqf = config.getobj(classname, "acqf", fallback=None)
extra_acqf_args = cls._get_acqf_options(acqf, config)
stimuli_per_trial = config.getint(classname, "stimuli_per_trial")
Expand All @@ -129,6 +156,8 @@ def from_config(cls, config: Config) -> "OptimizeAcqfGenerator":
max_gen_time = config.getfloat(classname, "max_gen_time", fallback=None)

return cls(
lb=lb,
ub=ub,
acqf=acqf,
acqf_kwargs=extra_acqf_args,
restarts=restarts,
Expand Down
12 changes: 6 additions & 6 deletions aepsych/kernels/pairwisekernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@ class PairwiseKernel(Kernel):
"""

def __init__(
self, latent_kernel: Kernel, is_partial_obs: bool=False, **kwargs
self, latent_kernel: Kernel, is_partial_obs: bool = False, **kwargs
) -> None:
"""
Args:
latent_kernel (Kernel): The underlying kernel used to compute the covariance for the GP.
is_partial_obs (bool): If the kernel should handle partial observations. Defaults to False.
Args:
latent_kernel (Kernel): The underlying kernel used to compute the covariance for the GP.
is_partial_obs (bool): If the kernel should handle partial observations. Defaults to False.
"""
super(PairwiseKernel, self).__init__(**kwargs)

Expand All @@ -40,11 +40,11 @@ def forward(
x1 (torch.Tensor): A `b x n x d` or `n x d` tensor, where `d = 2k` and `k` is the dimension of the latent space.
x2 (torch.Tensor): A `b x m x d` or `m x d` tensor, where `d = 2k` and `k` is the dimension of the latent space.
diag (bool): Should the Kernel compute the whole covariance matrix or just the diagonal? Defaults to False.
Returns:
torch.Tensor (or :class:`gpytorch.lazy.LazyTensor`) : A `b x n x m` or `n x m` tensor representing
the covariance matrix between `x1` and `x2`.
the covariance matrix between `x1` and `x2`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `diag`: `n` or `b x n`
Expand Down
6 changes: 3 additions & 3 deletions aepsych/kernels/rbf_partial_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,14 @@ def forward(
self, x1: torch.Tensor, x2: torch.Tensor, diag: bool = False, **params: Any
) -> torch.Tensor:
"""Computes the covariance matrix between x1 and x2 based on the RBF
Args:
x1 (torch.Tensor): A `b x n x d` or `n x d` tensor, where `d = 2k` and `k` is the dimension of the latent space.
x2 (torch.Tensor): A `b x m x d` or `m x d` tensor, where `d = 2k` and `k` is the dimension of the latent space.
diag (bool): Should the Kernel compute the whole covariance matrix (False) or just the diagonal (True)? Defaults to False.
Returns:
torch.Tensor: A `b x n x m` or `n x m` tensor representing the covariance matrix between `x1` and `x2`.
The exact size depends on the kernel's evaluation mode:
Expand Down
Loading
Loading