diff --git a/api/_modules/aepsych/models/base.html b/api/_modules/aepsych/models/base.html index 36b1b115a..9c4550e35 100644 --- a/api/_modules/aepsych/models/base.html +++ b/api/_modules/aepsych/models/base.html @@ -382,6 +382,9 @@

Source code for aepsych.models.base

         optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs.copy()
         max_fit_time = kwargs.pop("max_fit_time", self.max_fit_time)
         if max_fit_time is not None:
+            if "options" not in optimizer_kwargs:
+                optimizer_kwargs["options"] = {}
+
             # figure out how long evaluating a single samp
             starttime = time.time()
             _ = mll(self(train_x), train_y)
@@ -389,7 +392,8 @@ 

Source code for aepsych.models.base

                 time.time() - starttime + 1e-6
             )  # add an epsilon to avoid divide by zero
             n_eval = int(max_fit_time / single_eval_time)
-            optimizer_kwargs["options"] = {"maxfun": n_eval}
+
+            optimizer_kwargs["options"]["maxfun"] = n_eval
             logger.info(f"fit maxfun is {n_eval}")
 
         starttime = time.time()
diff --git a/api/_modules/aepsych/models/base/index.html b/api/_modules/aepsych/models/base/index.html
index 36b1b115a..9c4550e35 100644
--- a/api/_modules/aepsych/models/base/index.html
+++ b/api/_modules/aepsych/models/base/index.html
@@ -382,6 +382,9 @@ 

Source code for aepsych.models.base

         optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs.copy()
         max_fit_time = kwargs.pop("max_fit_time", self.max_fit_time)
         if max_fit_time is not None:
+            if "options" not in optimizer_kwargs:
+                optimizer_kwargs["options"] = {}
+
             # figure out how long evaluating a single samp
             starttime = time.time()
             _ = mll(self(train_x), train_y)
@@ -389,7 +392,8 @@ 

Source code for aepsych.models.base

                 time.time() - starttime + 1e-6
             )  # add an epsilon to avoid divide by zero
             n_eval = int(max_fit_time / single_eval_time)
-            optimizer_kwargs["options"] = {"maxfun": n_eval}
+
+            optimizer_kwargs["options"]["maxfun"] = n_eval
             logger.info(f"fit maxfun is {n_eval}")
 
         starttime = time.time()
diff --git a/api/_modules/aepsych/models/gp_classification.html b/api/_modules/aepsych/models/gp_classification.html
index b2db98c53..bb9511863 100644
--- a/api/_modules/aepsych/models/gp_classification.html
+++ b/api/_modules/aepsych/models/gp_classification.html
@@ -26,7 +26,7 @@ 

Source code for aepsych.models.gp_classification

import warnings from copy import deepcopy -from typing import Optional, Tuple, Union +from typing import Any, Dict, Optional, Tuple import gpytorch import numpy as np @@ -35,7 +35,7 @@

Source code for aepsych.models.gp_classification

from aepsych.factory.default import default_mean_covar_factory from aepsych.models.base import AEPsychModelDeviceMixin from aepsych.models.utils import select_inducing_points -from aepsych.utils import _process_bounds, promote_0d +from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d from aepsych.utils_logging import getLogger from gpytorch.likelihoods import BernoulliLikelihood, BetaLikelihood, Likelihood from gpytorch.models import ApproximateGP @@ -75,6 +75,7 @@

Source code for aepsych.models.gp_classification

inducing_size: Optional[int] = None, max_fit_time: Optional[float] = None, inducing_point_method: str = "auto", + optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: """Initialize the GP Classification model @@ -96,12 +97,17 @@

Source code for aepsych.models.gp_classification

If "pivoted_chol", selects points based on the pivoted Cholesky heuristic. If "kmeans++", selects points by performing kmeans++ clustering on the training data. If "auto", tries to determine the best method automatically. + optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during + fitting. Assumes we are using L-BFGS-B. """ lb, ub, self.dim = _process_bounds(lb, ub, dim) - self.max_fit_time = max_fit_time self.inducing_size = inducing_size or 99 + self.optimizer_options = ( + {"options": optimizer_options} if optimizer_options else {"options": {}} + ) + if self.inducing_size >= 100: logger.warning( ( @@ -192,6 +198,8 @@

Source code for aepsych.models.gp_classification

else: likelihood = None # fall back to __init__ default + optimizer_options = get_optimizer_options(config, classname) + return cls( lb=lb, ub=ub, @@ -202,6 +210,7 @@

Source code for aepsych.models.gp_classification

max_fit_time=max_fit_time, inducing_point_method=inducing_point_method, likelihood=likelihood, + optimizer_options=optimizer_options, )
def _reset_hyperparameters(self) -> None: @@ -269,7 +278,10 @@

Source code for aepsych.models.gp_classification

n = train_y.shape[0] mll = gpytorch.mlls.VariationalELBO(self.likelihood, self, n) - self._fit_mll(mll, **kwargs)
+ if "optimizer_kwargs" in kwargs: + self._fit_mll(mll, **kwargs) + else: + self._fit_mll(mll, optimizer_kwargs=self.optimizer_options, **kwargs)
[docs] def sample(self, x: torch.Tensor, num_samples: int) -> torch.Tensor: """Sample from underlying model. @@ -353,6 +365,7 @@

Source code for aepsych.models.gp_classification

inducing_size: Optional[int] = None, max_fit_time: Optional[float] = None, inducing_point_method: str = "auto", + optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: if likelihood is None: likelihood = BetaLikelihood() @@ -366,6 +379,7 @@

Source code for aepsych.models.gp_classification

inducing_size=inducing_size, max_fit_time=max_fit_time, inducing_point_method=inducing_point_method, + optimizer_options=optimizer_options, )
diff --git a/api/_modules/aepsych/models/gp_classification/index.html b/api/_modules/aepsych/models/gp_classification/index.html index b2db98c53..bb9511863 100644 --- a/api/_modules/aepsych/models/gp_classification/index.html +++ b/api/_modules/aepsych/models/gp_classification/index.html @@ -26,7 +26,7 @@

Source code for aepsych.models.gp_classification

import warnings from copy import deepcopy -from typing import Optional, Tuple, Union +from typing import Any, Dict, Optional, Tuple import gpytorch import numpy as np @@ -35,7 +35,7 @@

Source code for aepsych.models.gp_classification

from aepsych.factory.default import default_mean_covar_factory from aepsych.models.base import AEPsychModelDeviceMixin from aepsych.models.utils import select_inducing_points -from aepsych.utils import _process_bounds, promote_0d +from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d from aepsych.utils_logging import getLogger from gpytorch.likelihoods import BernoulliLikelihood, BetaLikelihood, Likelihood from gpytorch.models import ApproximateGP @@ -75,6 +75,7 @@

Source code for aepsych.models.gp_classification

inducing_size: Optional[int] = None, max_fit_time: Optional[float] = None, inducing_point_method: str = "auto", + optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: """Initialize the GP Classification model @@ -96,12 +97,17 @@

Source code for aepsych.models.gp_classification

If "pivoted_chol", selects points based on the pivoted Cholesky heuristic. If "kmeans++", selects points by performing kmeans++ clustering on the training data. If "auto", tries to determine the best method automatically. + optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during + fitting. Assumes we are using L-BFGS-B. """ lb, ub, self.dim = _process_bounds(lb, ub, dim) - self.max_fit_time = max_fit_time self.inducing_size = inducing_size or 99 + self.optimizer_options = ( + {"options": optimizer_options} if optimizer_options else {"options": {}} + ) + if self.inducing_size >= 100: logger.warning( ( @@ -192,6 +198,8 @@

Source code for aepsych.models.gp_classification

else: likelihood = None # fall back to __init__ default + optimizer_options = get_optimizer_options(config, classname) + return cls( lb=lb, ub=ub, @@ -202,6 +210,7 @@

Source code for aepsych.models.gp_classification

max_fit_time=max_fit_time, inducing_point_method=inducing_point_method, likelihood=likelihood, + optimizer_options=optimizer_options, )
def _reset_hyperparameters(self) -> None: @@ -269,7 +278,10 @@

Source code for aepsych.models.gp_classification

n = train_y.shape[0] mll = gpytorch.mlls.VariationalELBO(self.likelihood, self, n) - self._fit_mll(mll, **kwargs)
+ if "optimizer_kwargs" in kwargs: + self._fit_mll(mll, **kwargs) + else: + self._fit_mll(mll, optimizer_kwargs=self.optimizer_options, **kwargs)
[docs] def sample(self, x: torch.Tensor, num_samples: int) -> torch.Tensor: """Sample from underlying model. @@ -353,6 +365,7 @@

Source code for aepsych.models.gp_classification

inducing_size: Optional[int] = None, max_fit_time: Optional[float] = None, inducing_point_method: str = "auto", + optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: if likelihood is None: likelihood = BetaLikelihood() @@ -366,6 +379,7 @@

Source code for aepsych.models.gp_classification

inducing_size=inducing_size, max_fit_time=max_fit_time, inducing_point_method=inducing_point_method, + optimizer_options=optimizer_options, )
diff --git a/api/_modules/aepsych/models/gp_regression.html b/api/_modules/aepsych/models/gp_regression.html index bd316737b..c9f4c0953 100644 --- a/api/_modules/aepsych/models/gp_regression.html +++ b/api/_modules/aepsych/models/gp_regression.html @@ -25,7 +25,7 @@

Source code for aepsych.models.gp_regression

from __future__ import annotations
 
 from copy import deepcopy
-from typing import Dict, Optional, Tuple, Union
+from typing import Any, Dict, Optional, Tuple, Union
 
 import gpytorch
 import numpy as np
@@ -33,7 +33,7 @@ 

Source code for aepsych.models.gp_regression

from aepsych.config import Config
 from aepsych.factory.default import default_mean_covar_factory
 from aepsych.models.base import AEPsychModelDeviceMixin
-from aepsych.utils import _process_bounds, promote_0d
+from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d
 from aepsych.utils_logging import getLogger
 from gpytorch.likelihoods import GaussianLikelihood, Likelihood
 from gpytorch.models import ExactGP
@@ -58,6 +58,7 @@ 

Source code for aepsych.models.gp_regression

covar_module: Optional[gpytorch.kernels.Kernel] = None,
         likelihood: Optional[Likelihood] = None,
         max_fit_time: Optional[float] = None,
+        optimizer_options: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initialize the GP regression model
 
@@ -73,6 +74,8 @@ 

Source code for aepsych.models.gp_regression

                Gaussian likelihood.
             max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
                 there is no limit to the fitting time.
+            optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during
+                fitting. Assumes we are using L-BFGS-B.
         """
         if likelihood is None:
             likelihood = GaussianLikelihood()
@@ -82,6 +85,10 @@ 

Source code for aepsych.models.gp_regression

lb, ub, self.dim = _process_bounds(lb, ub, dim)
         self.max_fit_time = max_fit_time
 
+        self.optimizer_options = (
+            {"options": optimizer_options} if optimizer_options else {"options": {}}
+        )
+
         if mean_module is None or covar_module is None:
             default_mean, default_covar = default_mean_covar_factory(
                 dim=self.dim, stimuli_per_trial=self.stimuli_per_trial
@@ -123,6 +130,8 @@ 

Source code for aepsych.models.gp_regression

max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
 
+        optimizer_options = get_optimizer_options(config, classname)
+
         return {
             "lb": lb,
             "ub": ub,
@@ -131,6 +140,7 @@ 

Source code for aepsych.models.gp_regression

"covar_module": covar,
             "likelihood": likelihood,
             "max_fit_time": max_fit_time,
+            "optimizer_options": optimizer_options,
         }
[docs] @classmethod @@ -160,7 +170,7 @@

Source code for aepsych.models.gp_regression

        """
         self.set_train_data(train_x, train_y)
         mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self)
-        return self._fit_mll(mll, **kwargs)
+ return self._fit_mll(mll, self.optimizer_options, **kwargs)
[docs] def sample(self, x: torch.Tensor, num_samples: int) -> torch.Tensor: """Sample from underlying model. diff --git a/api/_modules/aepsych/models/gp_regression/index.html b/api/_modules/aepsych/models/gp_regression/index.html index bd316737b..c9f4c0953 100644 --- a/api/_modules/aepsych/models/gp_regression/index.html +++ b/api/_modules/aepsych/models/gp_regression/index.html @@ -25,7 +25,7 @@

Source code for aepsych.models.gp_regression

from __future__ import annotations
 
 from copy import deepcopy
-from typing import Dict, Optional, Tuple, Union
+from typing import Any, Dict, Optional, Tuple, Union
 
 import gpytorch
 import numpy as np
@@ -33,7 +33,7 @@ 

Source code for aepsych.models.gp_regression

from aepsych.config import Config
 from aepsych.factory.default import default_mean_covar_factory
 from aepsych.models.base import AEPsychModelDeviceMixin
-from aepsych.utils import _process_bounds, promote_0d
+from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d
 from aepsych.utils_logging import getLogger
 from gpytorch.likelihoods import GaussianLikelihood, Likelihood
 from gpytorch.models import ExactGP
@@ -58,6 +58,7 @@ 

Source code for aepsych.models.gp_regression

covar_module: Optional[gpytorch.kernels.Kernel] = None,
         likelihood: Optional[Likelihood] = None,
         max_fit_time: Optional[float] = None,
+        optimizer_options: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initialize the GP regression model
 
@@ -73,6 +74,8 @@ 

Source code for aepsych.models.gp_regression

                Gaussian likelihood.
             max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
                 there is no limit to the fitting time.
+            optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during
+                fitting. Assumes we are using L-BFGS-B.
         """
         if likelihood is None:
             likelihood = GaussianLikelihood()
@@ -82,6 +85,10 @@ 

Source code for aepsych.models.gp_regression

lb, ub, self.dim = _process_bounds(lb, ub, dim)
         self.max_fit_time = max_fit_time
 
+        self.optimizer_options = (
+            {"options": optimizer_options} if optimizer_options else {"options": {}}
+        )
+
         if mean_module is None or covar_module is None:
             default_mean, default_covar = default_mean_covar_factory(
                 dim=self.dim, stimuli_per_trial=self.stimuli_per_trial
@@ -123,6 +130,8 @@ 

Source code for aepsych.models.gp_regression

max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
 
+        optimizer_options = get_optimizer_options(config, classname)
+
         return {
             "lb": lb,
             "ub": ub,
@@ -131,6 +140,7 @@ 

Source code for aepsych.models.gp_regression

"covar_module": covar,
             "likelihood": likelihood,
             "max_fit_time": max_fit_time,
+            "optimizer_options": optimizer_options,
         }
[docs] @classmethod @@ -160,7 +170,7 @@

Source code for aepsych.models.gp_regression

        """
         self.set_train_data(train_x, train_y)
         mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self)
-        return self._fit_mll(mll, **kwargs)
+ return self._fit_mll(mll, self.optimizer_options, **kwargs)
[docs] def sample(self, x: torch.Tensor, num_samples: int) -> torch.Tensor: """Sample from underlying model. diff --git a/api/_modules/aepsych/models/monotonic_projection_gp.html b/api/_modules/aepsych/models/monotonic_projection_gp.html index 7fbe1636e..76c34f47f 100644 --- a/api/_modules/aepsych/models/monotonic_projection_gp.html +++ b/api/_modules/aepsych/models/monotonic_projection_gp.html @@ -25,7 +25,7 @@

Source code for aepsych.models.monotonic_projection_gp

from __future__ import annotations -from typing import Any, List, Optional, Union +from typing import Any, Dict, List, Optional, Union import gpytorch import numpy as np @@ -33,6 +33,7 @@

Source code for aepsych.models.monotonic_projection_gp

from aepsych.config import Config from aepsych.factory.default import default_mean_covar_factory from aepsych.models.gp_classification import GPClassificationModel +from aepsych.utils import get_optimizer_options from botorch.posteriors.gpytorch import GPyTorchPosterior from gpytorch.likelihoods import Likelihood from statsmodels.stats.moment_helpers import corr2cov, cov2corr @@ -122,6 +123,7 @@

Source code for aepsych.models.monotonic_projection_gp

inducing_size: Optional[int] = None, max_fit_time: Optional[float] = None, inducing_point_method: str = "auto", + optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: assert len(monotonic_dims) > 0 self.monotonic_dims = [int(d) for d in monotonic_dims] @@ -137,6 +139,7 @@

Source code for aepsych.models.monotonic_projection_gp

inducing_size=inducing_size, max_fit_time=max_fit_time, inducing_point_method=inducing_point_method, + optimizer_options=optimizer_options, )
[docs] def posterior( @@ -240,6 +243,8 @@

Source code for aepsych.models.monotonic_projection_gp

) min_f_val = config.getfloat(classname, "min_f_val", fallback=None) + optimizer_options = get_optimizer_options(config, classname) + return cls( lb=lb, ub=ub, @@ -253,6 +258,7 @@

Source code for aepsych.models.monotonic_projection_gp

monotonic_dims=monotonic_dims, monotonic_grid_size=monotonic_grid_size, min_f_val=min_f_val, + optimizer_options=optimizer_options, )
diff --git a/api/_modules/aepsych/models/monotonic_projection_gp/index.html b/api/_modules/aepsych/models/monotonic_projection_gp/index.html index 7fbe1636e..76c34f47f 100644 --- a/api/_modules/aepsych/models/monotonic_projection_gp/index.html +++ b/api/_modules/aepsych/models/monotonic_projection_gp/index.html @@ -25,7 +25,7 @@

Source code for aepsych.models.monotonic_projection_gp

from __future__ import annotations -from typing import Any, List, Optional, Union +from typing import Any, Dict, List, Optional, Union import gpytorch import numpy as np @@ -33,6 +33,7 @@

Source code for aepsych.models.monotonic_projection_gp

from aepsych.config import Config from aepsych.factory.default import default_mean_covar_factory from aepsych.models.gp_classification import GPClassificationModel +from aepsych.utils import get_optimizer_options from botorch.posteriors.gpytorch import GPyTorchPosterior from gpytorch.likelihoods import Likelihood from statsmodels.stats.moment_helpers import corr2cov, cov2corr @@ -122,6 +123,7 @@

Source code for aepsych.models.monotonic_projection_gp

inducing_size: Optional[int] = None, max_fit_time: Optional[float] = None, inducing_point_method: str = "auto", + optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: assert len(monotonic_dims) > 0 self.monotonic_dims = [int(d) for d in monotonic_dims] @@ -137,6 +139,7 @@

Source code for aepsych.models.monotonic_projection_gp

inducing_size=inducing_size, max_fit_time=max_fit_time, inducing_point_method=inducing_point_method, + optimizer_options=optimizer_options, )
[docs] def posterior( @@ -240,6 +243,8 @@

Source code for aepsych.models.monotonic_projection_gp

) min_f_val = config.getfloat(classname, "min_f_val", fallback=None) + optimizer_options = get_optimizer_options(config, classname) + return cls( lb=lb, ub=ub, @@ -253,6 +258,7 @@

Source code for aepsych.models.monotonic_projection_gp

monotonic_dims=monotonic_dims, monotonic_grid_size=monotonic_grid_size, min_f_val=min_f_val, + optimizer_options=optimizer_options, )
diff --git a/api/_modules/aepsych/models/monotonic_rejection_gp.html b/api/_modules/aepsych/models/monotonic_rejection_gp.html index c24e9424f..e7f9d7822 100644 --- a/api/_modules/aepsych/models/monotonic_rejection_gp.html +++ b/api/_modules/aepsych/models/monotonic_rejection_gp.html @@ -26,7 +26,7 @@

Source code for aepsych.models.monotonic_rejection_gp

from __future__ import annotations import warnings -from typing import Dict, List, Optional, Sequence, Tuple, Union +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union import gpytorch import numpy as np @@ -38,7 +38,7 @@

Source code for aepsych.models.monotonic_rejection_gp

from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad from aepsych.models.base import AEPsychMixin from aepsych.models.utils import select_inducing_points -from aepsych.utils import _process_bounds, promote_0d +from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d from botorch.fit import fit_gpytorch_mll from gpytorch.kernels import Kernel from gpytorch.likelihoods import BernoulliLikelihood, Likelihood @@ -81,6 +81,7 @@

Source code for aepsych.models.monotonic_rejection_gp

num_samples: int = 250, num_rejection_samples: int = 5000, inducing_point_method: str = "auto", + optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: """Initialize MonotonicRejectionGP. @@ -100,6 +101,8 @@

Source code for aepsych.models.monotonic_rejection_gp

acqf (MonotonicMCAcquisition, optional): Acquisition function to use for querying points. Defaults to MonotonicMCLSE. objective (Optional[MCAcquisitionObjective], optional): Transformation of GP to apply before computing acquisition function. Defaults to identity transform for gaussian likelihood, probit transform for probit-bernoulli. extra_acqf_args (Optional[Dict[str, object]], optional): Additional arguments to pass into the acquisition function. Defaults to None. + optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during + fitting. Assumes we are using L-BFGS-B. """ self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim) if likelihood is None: @@ -163,6 +166,9 @@

Source code for aepsych.models.monotonic_rejection_gp

self.num_rejection_samples = num_rejection_samples self.fixed_prior_mean = fixed_prior_mean self.inducing_points = inducing_points + self.optimizer_options = ( + {"options": optimizer_options} if optimizer_options else {"options": {}} + )
[docs] def fit(self, train_x: Tensor, train_y: Tensor, **kwargs) -> None: """Fit the model @@ -201,7 +207,7 @@

Source code for aepsych.models.monotonic_rejection_gp

mll = VariationalELBO( likelihood=self.likelihood, model=self, num_data=train_y.numel() ) - mll = fit_gpytorch_mll(mll) + mll = fit_gpytorch_mll(mll, optimizer_kwargs=self.optimizer_options)
[docs] def update(self, train_x: Tensor, train_y: Tensor, warmstart: bool = True) -> None: """ @@ -337,6 +343,8 @@

Source code for aepsych.models.monotonic_rejection_gp

classname, "monotonic_idxs", fallback=[-1] ) + optimizer_options = get_optimizer_options(config, classname) + return cls( monotonic_idxs=monotonic_idxs, lb=lb, @@ -347,6 +355,7 @@

Source code for aepsych.models.monotonic_rejection_gp

num_rejection_samples=num_rejection_samples, mean_module=mean, covar_module=covar, + optimizer_options=optimizer_options, )
[docs] def forward(self, x: torch.Tensor) -> gpytorch.distributions.MultivariateNormal: diff --git a/api/_modules/aepsych/models/monotonic_rejection_gp/index.html b/api/_modules/aepsych/models/monotonic_rejection_gp/index.html index c24e9424f..e7f9d7822 100644 --- a/api/_modules/aepsych/models/monotonic_rejection_gp/index.html +++ b/api/_modules/aepsych/models/monotonic_rejection_gp/index.html @@ -26,7 +26,7 @@

Source code for aepsych.models.monotonic_rejection_gp

from __future__ import annotations import warnings -from typing import Dict, List, Optional, Sequence, Tuple, Union +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union import gpytorch import numpy as np @@ -38,7 +38,7 @@

Source code for aepsych.models.monotonic_rejection_gp

from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad from aepsych.models.base import AEPsychMixin from aepsych.models.utils import select_inducing_points -from aepsych.utils import _process_bounds, promote_0d +from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d from botorch.fit import fit_gpytorch_mll from gpytorch.kernels import Kernel from gpytorch.likelihoods import BernoulliLikelihood, Likelihood @@ -81,6 +81,7 @@

Source code for aepsych.models.monotonic_rejection_gp

num_samples: int = 250, num_rejection_samples: int = 5000, inducing_point_method: str = "auto", + optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: """Initialize MonotonicRejectionGP. @@ -100,6 +101,8 @@

Source code for aepsych.models.monotonic_rejection_gp

acqf (MonotonicMCAcquisition, optional): Acquisition function to use for querying points. Defaults to MonotonicMCLSE. objective (Optional[MCAcquisitionObjective], optional): Transformation of GP to apply before computing acquisition function. Defaults to identity transform for gaussian likelihood, probit transform for probit-bernoulli. extra_acqf_args (Optional[Dict[str, object]], optional): Additional arguments to pass into the acquisition function. Defaults to None. + optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during + fitting. Assumes we are using L-BFGS-B. """ self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim) if likelihood is None: @@ -163,6 +166,9 @@

Source code for aepsych.models.monotonic_rejection_gp

self.num_rejection_samples = num_rejection_samples self.fixed_prior_mean = fixed_prior_mean self.inducing_points = inducing_points + self.optimizer_options = ( + {"options": optimizer_options} if optimizer_options else {"options": {}} + )
[docs] def fit(self, train_x: Tensor, train_y: Tensor, **kwargs) -> None: """Fit the model @@ -201,7 +207,7 @@

Source code for aepsych.models.monotonic_rejection_gp

mll = VariationalELBO( likelihood=self.likelihood, model=self, num_data=train_y.numel() ) - mll = fit_gpytorch_mll(mll) + mll = fit_gpytorch_mll(mll, optimizer_kwargs=self.optimizer_options)
[docs] def update(self, train_x: Tensor, train_y: Tensor, warmstart: bool = True) -> None: """ @@ -337,6 +343,8 @@

Source code for aepsych.models.monotonic_rejection_gp

classname, "monotonic_idxs", fallback=[-1] ) + optimizer_options = get_optimizer_options(config, classname) + return cls( monotonic_idxs=monotonic_idxs, lb=lb, @@ -347,6 +355,7 @@

Source code for aepsych.models.monotonic_rejection_gp

num_rejection_samples=num_rejection_samples, mean_module=mean, covar_module=covar, + optimizer_options=optimizer_options, )
[docs] def forward(self, x: torch.Tensor) -> gpytorch.distributions.MultivariateNormal: diff --git a/api/_modules/aepsych/models/pairwise_probit.html b/api/_modules/aepsych/models/pairwise_probit.html index 55fc5a483..0b3e5c081 100644 --- a/api/_modules/aepsych/models/pairwise_probit.html +++ b/api/_modules/aepsych/models/pairwise_probit.html @@ -23,15 +23,14 @@

Source code for aepsych.models.pairwise_probit

# This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import time -from typing import Any, Dict, Optional, Tuple, Union +from typing import Any, Dict, Optional, Tuple import gpytorch -import numpy as np import torch from aepsych.config import Config from aepsych.factory import default_mean_covar_factory from aepsych.models.base import AEPsychMixin -from aepsych.utils import _process_bounds, promote_0d +from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d from aepsych.utils_logging import getLogger from botorch.fit import fit_gpytorch_mll from botorch.models import PairwiseGP, PairwiseLaplaceMarginalLogLikelihood @@ -82,6 +81,7 @@

Source code for aepsych.models.pairwise_probit

dim: Optional[int] = None, covar_module: Optional[gpytorch.kernels.Kernel] = None, max_fit_time: Optional[float] = None, + optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: self.lb, self.ub, dim = _process_bounds(lb, ub, dim) @@ -111,6 +111,9 @@

Source code for aepsych.models.pairwise_probit

) self.dim = dim # The Pairwise constructor sets self.dim = None. + self.optimizer_options = ( + {"options": optimizer_options} if optimizer_options else {"options": {}} + )

[docs] def fit( self, @@ -119,6 +122,12 @@

Source code for aepsych.models.pairwise_probit

optimizer_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: + if optimizer_kwargs is not None: + if not "optimizer_kwargs" in optimizer_kwargs: + optimizer_kwargs = optimizer_kwargs.copy() + optimizer_kwargs.update(self.optimizer_options) + else: + optimizer_kwargs = {"options": self.optimizer_options} self.train() mll = PairwiseLaplaceMarginalLogLikelihood(self.likelihood, self) datapoints, comparisons = self._pairs_to_comparisons(train_x, train_y) @@ -127,17 +136,21 @@

Source code for aepsych.models.pairwise_probit

optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs.copy() max_fit_time = kwargs.pop("max_fit_time", self.max_fit_time) if max_fit_time is not None: + if "options" not in optimizer_kwargs: + optimizer_kwargs["options"] = {} + # figure out how long evaluating a single samp starttime = time.time() _ = mll(self(datapoints), comparisons) single_eval_time = time.time() - starttime n_eval = int(max_fit_time / single_eval_time) - optimizer_kwargs["maxfun"] = n_eval + + optimizer_kwargs["options"]["maxfun"] = n_eval logger.info(f"fit maxfun is {n_eval}") logger.info("Starting fit...") starttime = time.time() - fit_gpytorch_mll(mll, **kwargs, **optimizer_kwargs) + fit_gpytorch_mll(mll, optimizer_kwargs=optimizer_kwargs, **kwargs) logger.info(f"Fit done, time={time.time()-starttime}")

[docs] def update( @@ -227,7 +240,16 @@

Source code for aepsych.models.pairwise_probit

max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None) - return cls(lb=lb, ub=ub, dim=dim, covar_module=covar, max_fit_time=max_fit_time)

+ optimizer_options = get_optimizer_options(config, classname) + + return cls( + lb=lb, + ub=ub, + dim=dim, + covar_module=covar, + max_fit_time=max_fit_time, + optimizer_options=optimizer_options, + )
diff --git a/api/_modules/aepsych/models/pairwise_probit/index.html b/api/_modules/aepsych/models/pairwise_probit/index.html index 55fc5a483..0b3e5c081 100644 --- a/api/_modules/aepsych/models/pairwise_probit/index.html +++ b/api/_modules/aepsych/models/pairwise_probit/index.html @@ -23,15 +23,14 @@

Source code for aepsych.models.pairwise_probit

# This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import time -from typing import Any, Dict, Optional, Tuple, Union +from typing import Any, Dict, Optional, Tuple import gpytorch -import numpy as np import torch from aepsych.config import Config from aepsych.factory import default_mean_covar_factory from aepsych.models.base import AEPsychMixin -from aepsych.utils import _process_bounds, promote_0d +from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d from aepsych.utils_logging import getLogger from botorch.fit import fit_gpytorch_mll from botorch.models import PairwiseGP, PairwiseLaplaceMarginalLogLikelihood @@ -82,6 +81,7 @@

Source code for aepsych.models.pairwise_probit

dim: Optional[int] = None, covar_module: Optional[gpytorch.kernels.Kernel] = None, max_fit_time: Optional[float] = None, + optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: self.lb, self.ub, dim = _process_bounds(lb, ub, dim) @@ -111,6 +111,9 @@

Source code for aepsych.models.pairwise_probit

) self.dim = dim # The Pairwise constructor sets self.dim = None. + self.optimizer_options = ( + {"options": optimizer_options} if optimizer_options else {"options": {}} + )

[docs] def fit( self, @@ -119,6 +122,12 @@

Source code for aepsych.models.pairwise_probit

optimizer_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: + if optimizer_kwargs is not None: + if not "optimizer_kwargs" in optimizer_kwargs: + optimizer_kwargs = optimizer_kwargs.copy() + optimizer_kwargs.update(self.optimizer_options) + else: + optimizer_kwargs = {"options": self.optimizer_options} self.train() mll = PairwiseLaplaceMarginalLogLikelihood(self.likelihood, self) datapoints, comparisons = self._pairs_to_comparisons(train_x, train_y) @@ -127,17 +136,21 @@

Source code for aepsych.models.pairwise_probit

optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs.copy() max_fit_time = kwargs.pop("max_fit_time", self.max_fit_time) if max_fit_time is not None: + if "options" not in optimizer_kwargs: + optimizer_kwargs["options"] = {} + # figure out how long evaluating a single samp starttime = time.time() _ = mll(self(datapoints), comparisons) single_eval_time = time.time() - starttime n_eval = int(max_fit_time / single_eval_time) - optimizer_kwargs["maxfun"] = n_eval + + optimizer_kwargs["options"]["maxfun"] = n_eval logger.info(f"fit maxfun is {n_eval}") logger.info("Starting fit...") starttime = time.time() - fit_gpytorch_mll(mll, **kwargs, **optimizer_kwargs) + fit_gpytorch_mll(mll, optimizer_kwargs=optimizer_kwargs, **kwargs) logger.info(f"Fit done, time={time.time()-starttime}")

[docs] def update( @@ -227,7 +240,16 @@

Source code for aepsych.models.pairwise_probit

max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None) - return cls(lb=lb, ub=ub, dim=dim, covar_module=covar, max_fit_time=max_fit_time)

+ optimizer_options = get_optimizer_options(config, classname) + + return cls( + lb=lb, + ub=ub, + dim=dim, + covar_module=covar, + max_fit_time=max_fit_time, + optimizer_options=optimizer_options, + )
diff --git a/api/_modules/aepsych/models/semi_p.html b/api/_modules/aepsych/models/semi_p.html index d8c538778..a9d551241 100644 --- a/api/_modules/aepsych/models/semi_p.html +++ b/api/_modules/aepsych/models/semi_p.html @@ -26,7 +26,7 @@

Source code for aepsych.models.semi_p

 from __future__ import annotations
 
 from copy import deepcopy
-from typing import Any, Optional, Tuple, Union
+from typing import Any, Dict, Optional, Tuple, Union
 
 import gpytorch
 import numpy as np
@@ -36,7 +36,7 @@ 

Source code for aepsych.models.semi_p

 from aepsych.config import Config
 from aepsych.likelihoods import BernoulliObjectiveLikelihood, LinearBernoulliLikelihood
 from aepsych.models import GPClassificationModel
-from aepsych.utils import _process_bounds, promote_0d
+from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d
 from aepsych.utils_logging import getLogger
 from botorch.acquisition.objective import PosteriorTransform
 from botorch.optim.fit import fit_gpytorch_mll_scipy
@@ -207,6 +207,7 @@ 

Source code for aepsych.models.semi_p

         inducing_size: Optional[int] = None,
         max_fit_time: Optional[float] = None,
         inducing_point_method: str = "auto",
+        optimizer_options: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initialize SemiParametricGP.
@@ -230,6 +231,8 @@ 

Source code for aepsych.models.semi_p

                 If "pivoted_chol", selects points based on the pivoted Cholesky heuristic.
                 If "kmeans++", selects points by performing kmeans++ clustering on the training data.
                 If "auto", tries to determine the best method automatically.
+            optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during
+                fitting. Assumes we are using L-BFGS-B.
         """
 
         lb, ub, dim = _process_bounds(lb, ub, dim)
@@ -270,6 +273,7 @@ 

Source code for aepsych.models.semi_p

             inducing_size=inducing_size,
             max_fit_time=max_fit_time,
             inducing_point_method=inducing_point_method,
+            optimizer_options=optimizer_options,
         )
 
 
[docs] @classmethod @@ -312,6 +316,8 @@

Source code for aepsych.models.semi_p

 
         slope_mean = config.getfloat(classname, "slope_mean", fallback=2)
 
+        optimizer_options = get_optimizer_options(config, classname)
+
         return cls(
             lb=lb,
             ub=ub,
@@ -322,6 +328,7 @@ 

Source code for aepsych.models.semi_p

             inducing_size=inducing_size,
             max_fit_time=max_fit_time,
             inducing_point_method=inducing_point_method,
+            optimizer_options=optimizer_options,
         )
[docs] def fit( @@ -458,6 +465,7 @@

Source code for aepsych.models.semi_p

         inducing_size: Optional[int] = None,
         max_fit_time: Optional[float] = None,
         inducing_point_method: str = "auto",
+        optimizer_options: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initialize HadamardSemiPModel.
@@ -480,6 +488,8 @@ 

Source code for aepsych.models.semi_p

                 If "pivoted_chol", selects points based on the pivoted Cholesky heuristic.
                 If "kmeans++", selects points by performing kmeans++ clustering on the training data.
                 If "auto", tries to determine the best method automatically.
+            optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during
+                fitting. Assumes we are using L-BFGS-B.
         """
         super().__init__(
             lb=lb,
@@ -488,6 +498,7 @@ 

Source code for aepsych.models.semi_p

             inducing_size=inducing_size,
             max_fit_time=max_fit_time,
             inducing_point_method=inducing_point_method,
+            optimizer_options=optimizer_options,
         )
 
         self.stim_dim = stim_dim
@@ -613,6 +624,8 @@ 

Source code for aepsych.models.semi_p

 
         stim_dim = config.getint(classname, "stim_dim", fallback=0)
 
+        optimizer_options = get_optimizer_options(config, classname)
+
         return cls(
             lb=lb,
             ub=ub,
@@ -627,6 +640,7 @@ 

Source code for aepsych.models.semi_p

             inducing_size=inducing_size,
             max_fit_time=max_fit_time,
             inducing_point_method=inducing_point_method,
+            optimizer_options=optimizer_options,
         )
[docs] def predict( diff --git a/api/_modules/aepsych/models/semi_p/index.html b/api/_modules/aepsych/models/semi_p/index.html index d8c538778..a9d551241 100644 --- a/api/_modules/aepsych/models/semi_p/index.html +++ b/api/_modules/aepsych/models/semi_p/index.html @@ -26,7 +26,7 @@

Source code for aepsych.models.semi_p

 from __future__ import annotations
 
 from copy import deepcopy
-from typing import Any, Optional, Tuple, Union
+from typing import Any, Dict, Optional, Tuple, Union
 
 import gpytorch
 import numpy as np
@@ -36,7 +36,7 @@ 

Source code for aepsych.models.semi_p

 from aepsych.config import Config
 from aepsych.likelihoods import BernoulliObjectiveLikelihood, LinearBernoulliLikelihood
 from aepsych.models import GPClassificationModel
-from aepsych.utils import _process_bounds, promote_0d
+from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d
 from aepsych.utils_logging import getLogger
 from botorch.acquisition.objective import PosteriorTransform
 from botorch.optim.fit import fit_gpytorch_mll_scipy
@@ -207,6 +207,7 @@ 

Source code for aepsych.models.semi_p

         inducing_size: Optional[int] = None,
         max_fit_time: Optional[float] = None,
         inducing_point_method: str = "auto",
+        optimizer_options: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initialize SemiParametricGP.
@@ -230,6 +231,8 @@ 

Source code for aepsych.models.semi_p

                 If "pivoted_chol", selects points based on the pivoted Cholesky heuristic.
                 If "kmeans++", selects points by performing kmeans++ clustering on the training data.
                 If "auto", tries to determine the best method automatically.
+            optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during
+                fitting. Assumes we are using L-BFGS-B.
         """
 
         lb, ub, dim = _process_bounds(lb, ub, dim)
@@ -270,6 +273,7 @@ 

Source code for aepsych.models.semi_p

             inducing_size=inducing_size,
             max_fit_time=max_fit_time,
             inducing_point_method=inducing_point_method,
+            optimizer_options=optimizer_options,
         )
 
 
[docs] @classmethod @@ -312,6 +316,8 @@

Source code for aepsych.models.semi_p

 
         slope_mean = config.getfloat(classname, "slope_mean", fallback=2)
 
+        optimizer_options = get_optimizer_options(config, classname)
+
         return cls(
             lb=lb,
             ub=ub,
@@ -322,6 +328,7 @@ 

Source code for aepsych.models.semi_p

             inducing_size=inducing_size,
             max_fit_time=max_fit_time,
             inducing_point_method=inducing_point_method,
+            optimizer_options=optimizer_options,
         )
[docs] def fit( @@ -458,6 +465,7 @@

Source code for aepsych.models.semi_p

         inducing_size: Optional[int] = None,
         max_fit_time: Optional[float] = None,
         inducing_point_method: str = "auto",
+        optimizer_options: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initialize HadamardSemiPModel.
@@ -480,6 +488,8 @@ 

Source code for aepsych.models.semi_p

                 If "pivoted_chol", selects points based on the pivoted Cholesky heuristic.
                 If "kmeans++", selects points by performing kmeans++ clustering on the training data.
                 If "auto", tries to determine the best method automatically.
+            optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during
+                fitting. Assumes we are using L-BFGS-B.
         """
         super().__init__(
             lb=lb,
@@ -488,6 +498,7 @@ 

Source code for aepsych.models.semi_p

             inducing_size=inducing_size,
             max_fit_time=max_fit_time,
             inducing_point_method=inducing_point_method,
+            optimizer_options=optimizer_options,
         )
 
         self.stim_dim = stim_dim
@@ -613,6 +624,8 @@ 

Source code for aepsych.models.semi_p

 
         stim_dim = config.getint(classname, "stim_dim", fallback=0)
 
+        optimizer_options = get_optimizer_options(config, classname)
+
         return cls(
             lb=lb,
             ub=ub,
@@ -627,6 +640,7 @@ 

Source code for aepsych.models.semi_p

             inducing_size=inducing_size,
             max_fit_time=max_fit_time,
             inducing_point_method=inducing_point_method,
+            optimizer_options=optimizer_options,
         )
[docs] def predict( diff --git a/api/_modules/aepsych/utils.html b/api/_modules/aepsych/utils.html index 82ac0c359..95f1d534c 100644 --- a/api/_modules/aepsych/utils.html +++ b/api/_modules/aepsych/utils.html @@ -364,6 +364,34 @@

Source code for aepsych.utils

             )  # Choice parameters with n_choices < 3 add n_choices - 1 dims
 
     return dim
+ + +
[docs]def get_optimizer_options(config: Config, name: str) -> Dict[str, Any]: + """Return the optimizer options for the model to pass to the SciPy L-BFGS-B + optimizer. Only the somewhat useful ones for AEPsych are searched for: maxcor, + ftol, gtol, maxfun, maxiter, maxls. See docs for details: + https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html#optimize-minimize-lbfgsb + + Args: + config (Config): Config to search for options. + name (str): Model name to look for options for. + + Return: + Dict[str, Any]: Dictionary of options to pass to SciPy's minimize, assuming the + method is L-BFGS-B. + """ + options: Dict[str, Optional[Union[float, int]]] = {} + + options["maxcor"] = config.getint(name, "maxcor", fallback=None) + options["ftol"] = config.getfloat(name, "ftol", fallback=None) + options["gtol"] = config.getfloat(name, "gtol", fallback=None) + options["maxfun"] = config.getint(name, "maxfun", fallback=None) + options["maxiter"] = config.getint(name, "maxiter", fallback=None) + options["maxls"] = config.getint(name, "maxls", fallback=None) + + # Filter all the nones out, which could just come back as an empty dict + options = {key: value for key, value in options.items() if value is not None} + return options
diff --git a/api/_modules/aepsych/utils/index.html b/api/_modules/aepsych/utils/index.html index 82ac0c359..95f1d534c 100644 --- a/api/_modules/aepsych/utils/index.html +++ b/api/_modules/aepsych/utils/index.html @@ -364,6 +364,34 @@

Source code for aepsych.utils

             )  # Choice parameters with n_choices < 3 add n_choices - 1 dims
 
     return dim
+ + +
[docs]def get_optimizer_options(config: Config, name: str) -> Dict[str, Any]: + """Return the optimizer options for the model to pass to the SciPy L-BFGS-B + optimizer. Only the somewhat useful ones for AEPsych are searched for: maxcor, + ftol, gtol, maxfun, maxiter, maxls. See docs for details: + https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html#optimize-minimize-lbfgsb + + Args: + config (Config): Config to search for options. + name (str): Model name to look for options for. + + Return: + Dict[str, Any]: Dictionary of options to pass to SciPy's minimize, assuming the + method is L-BFGS-B. + """ + options: Dict[str, Optional[Union[float, int]]] = {} + + options["maxcor"] = config.getint(name, "maxcor", fallback=None) + options["ftol"] = config.getfloat(name, "ftol", fallback=None) + options["gtol"] = config.getfloat(name, "gtol", fallback=None) + options["maxfun"] = config.getint(name, "maxfun", fallback=None) + options["maxiter"] = config.getint(name, "maxiter", fallback=None) + options["maxls"] = config.getint(name, "maxls", fallback=None) + + # Filter all the nones out, which could just come back as an empty dict + options = {key: value for key, value in options.items() if value is not None} + return options
diff --git a/api/genindex.html b/api/genindex.html index 67a560b26..ab43fd5e6 100644 --- a/api/genindex.html +++ b/api/genindex.html @@ -1021,10 +1021,10 @@

G

  • get_dim() (in module aepsych.utils)
  • - - +
    • get_jnd() (aepsych.models.base.AEPsychMixin method)
    • get_next_filename() (in module aepsych.server.server)
    • +
    • get_optimizer_options() (in module aepsych.utils) +
    • get_outcome_for() (aepsych.database.db.Database method)
    • get_param_for() (aepsych.database.db.Database method) diff --git a/api/models.html b/api/models.html index 3bc406479..94cc86ced 100644 --- a/api/models.html +++ b/api/models.html @@ -542,6 +542,8 @@

      SubmodulesMonotonicMCAcquisition, optional) – Acquisition function to use for querying points. Defaults to MonotonicMCLSE.

    • objective (Optional[MCAcquisitionObjective], optional) – Transformation of GP to apply before computing acquisition function. Defaults to identity transform for gaussian likelihood, probit transform for probit-bernoulli.

    • extra_acqf_args (Optional[Dict[str, object]], optional) – Additional arguments to pass into the acquisition function. Defaults to None.

    • +
    • optimizer_options (Dict[str, Any], optional) – Optimizer options to pass to the SciPy optimizer during +fitting. Assumes we are using L-BFGS-B.

    • lb (Union[np.ndarray, torch.Tensor]) –

    • ub (Union[np.ndarray, torch.Tensor]) –

    • dim (Optional[int]) –

    • @@ -894,6 +900,8 @@

      SubmodulesMonotonicMCAcquisition, optional) – Acquisition function to use for querying points. Defaults to MonotonicMCLSE.

    • objective (Optional[MCAcquisitionObjective], optional) – Transformation of GP to apply before computing acquisition function. Defaults to identity transform for gaussian likelihood, probit transform for probit-bernoulli.

    • extra_acqf_args (Optional[Dict[str, object]], optional) – Additional arguments to pass into the acquisition function. Defaults to None.

    • +
    • optimizer_options (Dict[str, Any], optional) – Optimizer options to pass to the SciPy optimizer during +fitting. Assumes we are using L-BFGS-B.

    • lb (Union[np.ndarray, torch.Tensor]) –

    • ub (Union[np.ndarray, torch.Tensor]) –

    • dim (Optional[int]) –

    • @@ -1217,6 +1227,8 @@

      SubmodulesMonotonicMCAcquisition, optional) – Acquisition function to use for querying points. Defaults to MonotonicMCLSE.

    • objective (Optional[MCAcquisitionObjective], optional) – Transformation of GP to apply before computing acquisition function. Defaults to identity transform for gaussian likelihood, probit transform for probit-bernoulli.

    • extra_acqf_args (Optional[Dict[str, object]], optional) – Additional arguments to pass into the acquisition function. Defaults to None.

    • +
    • optimizer_options (Dict[str, Any], optional) – Optimizer options to pass to the SciPy optimizer during +fitting. Assumes we are using L-BFGS-B.

    • lb (Union[np.ndarray, torch.Tensor]) –

    • ub (Union[np.ndarray, torch.Tensor]) –

    • dim (Optional[int]) –

    • @@ -894,6 +900,8 @@

      SubmodulesMonotonicMCAcquisition, optional) – Acquisition function to use for querying points. Defaults to MonotonicMCLSE.

    • objective (Optional[MCAcquisitionObjective], optional) – Transformation of GP to apply before computing acquisition function. Defaults to identity transform for gaussian likelihood, probit transform for probit-bernoulli.

    • extra_acqf_args (Optional[Dict[str, object]], optional) – Additional arguments to pass into the acquisition function. Defaults to None.

    • +
    • optimizer_options (Dict[str, Any], optional) – Optimizer options to pass to the SciPy optimizer during +fitting. Assumes we are using L-BFGS-B.

    • lb (Union[np.ndarray, torch.Tensor]) –

    • ub (Union[np.ndarray, torch.Tensor]) –

    • dim (Optional[int]) –

    • @@ -1217,6 +1227,8 @@

      Submodules +
      +aepsych.utils.get_optimizer_options(config, name)[source]
      +

      Return the optimizer options for the model to pass to the SciPy L-BFGS-B +optimizer. Only the somewhat useful ones for AEPsych are searched for: maxcor, +ftol, gtol, maxfun, maxiter, maxls. See docs for details: +https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html#optimize-minimize-lbfgsb

      +
      +
      Parameters:
      +
        +
      • config (Config) – Config to search for options.

      • +
      • name (str) – Model name to look for options for.

      • +
      +
      +
      Returns:
      +

      +
      Dictionary of options to pass to SciPy’s minimize, assuming the

      method is L-BFGS-B.

      +
      +
      +

      +
      +
      Return type:
      +

      Dict[str, Any]

      +
      +
      +

    diff --git a/api/utils/index.html b/api/utils/index.html index f12cbe105..9ed032e92 100644 --- a/api/utils/index.html +++ b/api/utils/index.html @@ -166,6 +166,32 @@

    aepsych.utils +
    +aepsych.utils.get_optimizer_options(config, name)[source]
    +

    Return the optimizer options for the model to pass to the SciPy L-BFGS-B +optimizer. Only the somewhat useful ones for AEPsych are searched for: maxcor, +ftol, gtol, maxfun, maxiter, maxls. See docs for details: +https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html#optimize-minimize-lbfgsb

    +
    +
    Parameters:
    +
      +
    • config (Config) – Config to search for options.

    • +
    • name (str) – Model name to look for options for.

    • +
    +
    +
    Returns:
    +

    +
    Dictionary of options to pass to SciPy’s minimize, assuming the

    method is L-BFGS-B.

    +
    +
    +

    +
    +
    Return type:
    +

    Dict[str, Any]

    +
    +
    +

    diff --git a/demos/ParticleEffectDemo.html b/demos/ParticleEffectDemo.html index 336caa1a9..6b8fb8f14 100644 --- a/demos/ParticleEffectDemo.html +++ b/demos/ParticleEffectDemo.html @@ -64,7 +64,7 @@
    -
    +

    Particle Effect Demo