diff --git a/aepsych/config.py b/aepsych/config.py
index 857286289..3eb117d9d 100644
--- a/aepsych/config.py
+++ b/aepsych/config.py
@@ -182,8 +182,8 @@ def update(
# Validate the parameter-specific block
self._check_param_settings(par_name)
- lb[i] = self[par_name]["lower_bound"]
- ub[i] = self[par_name]["upper_bound"]
+ lb[i] = self[par_name].get("lower_bound", fallback="0")
+ ub[i] = self[par_name].get("upper_bound", fallback="1")
self["common"]["lb"] = f"[{', '.join(lb)}]"
self["common"]["ub"] = f"[{', '.join(ub)}]"
@@ -276,6 +276,11 @@ def _check_param_settings(self, param_name: str) -> None:
and self.getint(param_name, "upper_bound") % 1 == 0
):
raise ValueError(f"Parameter {param_name} has non-discrete bounds.")
+ elif param_block["par_type"] == "binary":
+ if "lower_bound" in param_block or "upper_bound" in param_block:
+ raise ValueError(
+ f"Parameter {param_name} is binary and shouldn't have bounds."
+ )
else:
raise ValueError(
f"Parameter {param_name} has an unsupported parameter type {param_block['par_type']}."
diff --git a/aepsych/transforms/parameters.py b/aepsych/transforms/parameters.py
index a31069648..a978925e0 100644
--- a/aepsych/transforms/parameters.py
+++ b/aepsych/transforms/parameters.py
@@ -172,8 +172,8 @@ def get_config_options(
except KeyError: # Probably because par doesn't have its own section
par_type = "continuous"
- # Discrete variable
- if par_type == "discrete":
+ # Discrete or binary variable
+ if par_type in ["discrete", "binary"]:
round = Discretize.from_config(
config=config, name=par, options=transform_options
)
@@ -197,7 +197,9 @@ def get_config_options(
transform_dict[f"{par}_Log10Plus"] = log10
# Normalize scale (defaults true)
- if config.getboolean(par, "normalize_scale", fallback=True):
+ if config.getboolean(
+ par, "normalize_scale", fallback=True
+ ) and par_type not in ["discrrete", "binary"]:
normalize = NormalizeScale.from_config(
config=config, name=par, options=transform_options
)
diff --git a/docs/parameters.md b/docs/parameters.md
index f82b5931f..f3ea9b2ba 100644
--- a/docs/parameters.md
+++ b/docs/parameters.md
@@ -15,6 +15,7 @@ what parameter types are used and whatever transformations are used.
Currently, we only support continuous parameters. More parameter types soon to come!
Continuous
+
```ini
[parameter]
par_type = continuous
@@ -28,6 +29,7 @@ include negative values (e.g., lower bound = -1, upper bound = 1) or have very l
ranges (e.g., lower bound = 0, upper bound = 1,000,000).
Discrete
+
```ini
[parameter]
par_type = discrete
@@ -40,6 +42,24 @@ and necessity of bounds. However, discrete parameters will use continuous relaxa
allow the models and generators to handle discrete input/outputs. This is necessarily
ordinal with equal spacing between each discrete point within the bounds.
+Binary
+
+```ini
+[parameter]
+par_type = binary
+```
+
+Binary parameters are implemented as a special case of discrete parameters. No bounds
+should be set. It will be treated as a discrete parameter that will either be 0 or 1.
+Binary parameters are equivalent to this:
+
+```ini
+[parameter]
+par_type = discrete
+lower_bound = 0
+upper_bound = 1
+```
+
Parameter Transformations
Currently, we only support a log scale transformation to parameters. More parameter
transformations to come! In general, you can define your parameters in the raw
diff --git a/tests/test_transforms.py b/tests/test_transforms.py
index dfdb1ae90..44024fcd3 100644
--- a/tests/test_transforms.py
+++ b/tests/test_transforms.py
@@ -17,7 +17,7 @@
ParameterTransformedModel,
ParameterTransforms,
)
-from aepsych.transforms.ops import Log10Plus, NormalizeScale
+from aepsych.transforms.ops import Discretize, Log10Plus, NormalizeScale
class TransformsConfigTest(unittest.TestCase):
@@ -364,7 +364,7 @@ def test_normalize_scale(self):
class TransformDiscrete(unittest.TestCase):
def test_discrete_bounds(self):
- config_str = f"""
+ config_str = """
[common]
parnames = [signal1, signal2]
stimuli_per_trial = 1
@@ -448,3 +448,55 @@ def test_discrete_model(self):
est_max = x[np.argmin((zhat - target) ** 2)]
diff = np.abs(est_max / 100 - target)
self.assertTrue(diff < 0.15, f"Diff = {diff}")
+
+ def test_binary(self):
+ config_str = """
+ [common]
+ parnames = [signal1]
+ stimuli_per_trial = 1
+ outcome_types = [binary]
+ strategy_names = [init_strat]
+
+ [signal1]
+ par_type = binary
+
+ [init_strat]
+ generator = SobolGenerator
+ min_asks = 1
+ """
+ config = Config()
+ config.update(config_str=config_str)
+
+ strat = SequentialStrategy.from_config(config)
+
+ transforms = strat.transforms
+
+ self.assertTrue(len(transforms) == 1)
+ self.assertTrue(isinstance(list(transforms.values())[0], Discretize))
+ self.assertTrue(
+ torch.all(config.gettensor("common", "lb") == torch.tensor([0]))
+ )
+ self.assertTrue(
+ torch.all(config.gettensor("common", "ub") == torch.tensor([1]))
+ )
+
+ bad_config_str = """
+ [common]
+ parnames = [signal1]
+ stimuli_per_trial = 1
+ outcome_types = [binary]
+ strategy_names = [init_strat]
+
+ [signal1]
+ par_type = binary
+ lower_bound = 0
+ upper_bound = 1
+
+ [init_strat]
+ generator = SobolGenerator
+ min_asks = 1
+ """
+ config = Config()
+
+ with self.assertRaises(ValueError):
+ config.update(config_str=bad_config_str)