From edf7389e461b90e6418bb78ce358c32d19ddde32 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 10:35:19 -0500 Subject: [PATCH 01/34] requirements: update optuna requirement from <4.1.0 to <4.2.0 (#3110) Updates the requirements on [optuna](https://github.com/optuna/optuna) to permit the latest version. - [Release notes](https://github.com/optuna/optuna/releases) - [Commits](https://github.com/optuna/optuna/compare/v4.0.0...v4.1.0) --- updated-dependencies: - dependency-name: optuna dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 818c32c38d..644ccbcef3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ matplotlib<3.7.6 modeci_mdf<0.5, >=0.4.3; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' networkx<3.5 numpy>=1.21.0, <1.26.5 -optuna<4.1.0 +optuna<4.2.0 packaging<25.0 pandas<2.2.4 pillow<11.1.0 From c70ee2acc2e272a03af7391eba391b8812591e27 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 8 Nov 2024 22:57:51 -0500 Subject: [PATCH 02/34] test/EMComposition: Split softmax_choice test Results, errors, warning, are testid independently for each value of tested softmax_choice parameter. Signed-off-by: Jan Vesely --- tests/composition/test_emcomposition.py | 59 +++++++++++++------------ 1 file changed, 31 insertions(+), 28 deletions(-) diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index 55c01ad7b5..be04be5907 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -221,34 +221,37 @@ def test_memory_fill(start, memory_fill): elif repeat and repeat < memory_capacity: # Multi-entry specification and repeat = number entries; remainder test_memory_fill(start=repeat, memory_fill=memory_fill) - def test_softmax_choice(self): - for softmax_choice in [pnl.WEIGHTED_AVG, pnl.ARG_MAX, pnl.PROBABILISTIC]: - em = EMComposition(memory_template=[[[1,.1,.1]], [[1,.1,.1]], [[.1,.1,1]]], - softmax_choice=softmax_choice, - enable_learning=False) - result = em.run(inputs={em.query_input_nodes[0]:[[1,0,0]]}) - if softmax_choice == pnl.WEIGHTED_AVG: - np.testing.assert_allclose(result, [[0.93016008, 0.1, 0.16983992]]) - if softmax_choice == pnl.ARG_MAX: - np.testing.assert_allclose(result, [[1, .1, .1]]) - if softmax_choice == pnl.PROBABILISTIC: # NOTE: actual stochasticity not tested here - np.testing.assert_allclose(result, [[1, .1, .1]]) - - em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]]) - for softmax_choice in [pnl.ARG_MAX, pnl.PROBABILISTIC]: - with pytest.raises(EMCompositionError) as error_text: - em.parameters.softmax_choice.set(softmax_choice) - em.learn() - assert (f"The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg " - f"of '{em.name}' cannot be used during learning; change to WEIGHTED_AVG." in str(error_text.value)) - - for softmax_choice in [pnl.ARG_MAX, pnl.PROBABILISTIC]: - with pytest.warns(UserWarning) as warning: - em = EMComposition(softmax_choice=softmax_choice, enable_learning=True) - warning_msg = (f"The 'softmax_choice' arg of '{em.name}' is set to '{softmax_choice}' with " - f"'enable_learning' set to True (or a list); this will generate an error if its " - f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") - assert warning_msg in str(warning[0].message) + @pytest.mark.parametrize("softmax_choice, expected", + [(pnl.WEIGHTED_AVG, [[0.93016008, 0.1, 0.16983992]]), + (pnl.ARG_MAX, [[1, .1, .1]]), + (pnl.PROBABILISTIC, [[1, .1, .1]]), # NOTE: actual stochasticity not tested here + ]) + def test_softmax_choice(self, softmax_choice, expected): + em = EMComposition(memory_template=[[[1,.1,.1]], [[1,.1,.1]], [[.1,.1,1]]], + softmax_choice=softmax_choice, + enable_learning=False) + result = em.run(inputs={em.query_input_nodes[0]:[[1,0,0]]}) + + np.testing.assert_allclose(result, expected) + + @pytest.mark.parametrize("softmax_choice", [pnl.ARG_MAX, pnl.PROBABILISTIC]) + def test_softmax_choice_error(self, softmax_choice): + em = EMComposition(memory_template=[[[1, .1, .1]], [[.1, 1, .1]], [[.1, .1, 1]]]) + msg = (f"The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg " + f"of '{em.name}' cannot be used during learning; change to WEIGHTED_AVG.") + + with pytest.raises(EMCompositionError, match=msg): + em.parameters.softmax_choice.set(softmax_choice) + em.learn() + + @pytest.mark.parametrize("softmax_choice", [pnl.ARG_MAX, pnl.PROBABILISTIC]) + def test_softmax_choice_warn(self, softmax_choice): + warning_msg = (f"The 'softmax_choice' arg of '.*' is set to '{softmax_choice}' with " + f"'enable_learning' set to True \\(or a list\\); this will generate an error if its " + f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") + + with pytest.warns(UserWarning, match=warning_msg): + EMComposition(softmax_choice=softmax_choice, enable_learning=True) @pytest.mark.pytorch From ced79359c0aa1035ec04e1722e62f2a8c31ae3ef Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 11 Nov 2024 13:58:17 -0500 Subject: [PATCH 03/34] Functions/OneHot: Fix "DETERMINISITC" typo Signed-off-by: Jan Vesely --- .../components/functions/nonstateful/selectionfunctions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index fb73085de6..07ebe97102 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -187,7 +187,7 @@ class OneHot(SelectionFunction): First (possibly only) item specifies a template for the array to be transformed; if `mode ` is *PROB* then a 2nd item must be included that is a probability distribution with same length as 1st item. - mode : DETERMINISITC, PROB, PROB_INDICATOR, + mode : DETERMINISTiC, PROB, PROB_INDICATOR, ARG_MAX, ARG_MAX_ABS, ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR, ARG_MIN, ARG_MIN_ABS, ARG_MIN_INDICATOR, ARG_MIN_ABS_INDICATOR, MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, @@ -237,7 +237,7 @@ class OneHot(SelectionFunction): distribution, each element of which specifies the probability for selecting the corresponding element of the 1st item. - mode : DETERMINISITC, PROB, PROB_INDICATOR, + mode : DETERMINISTIC, PROB, PROB_INDICATOR, ARG_MAX, ARG_MAX_ABS, ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR, ARG_MIN, ARG_MIN_ABS, ARG_MIN_INDICATOR, ARG_MIN_ABS_INDICATOR, MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, From 3053b0c28e8437feb443a51a58ca032d289ef027 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 9 Nov 2024 00:31:22 -0500 Subject: [PATCH 04/34] Functions/OneHot: Use local PRNG to randomly select extreme index Simplify implementation. Handle multidimensional arrays. Signed-off-by: Jan Vesely --- .../nonstateful/selectionfunctions.py | 82 +++++++++---------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index 07ebe97102..a7767e505e 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -641,6 +641,9 @@ def _parse_mode(self, mode): indicator = True tie = ALL + else: + assert False, f"Unknown mode: {mode}" + return direction, abs_val, indicator, tie def _function(self, @@ -693,65 +696,62 @@ def _function(self, random_value = random_state.uniform() chosen_item = next(element for element in cum_sum if element > random_value) chosen_in_cum_sum = np.where(cum_sum == chosen_item, 1, 0) - if mode is PROB: + if mode == PROB: result = v * chosen_in_cum_sum else: result = np.ones_like(v) * chosen_in_cum_sum + # chosen_item = np.random.choice(v, 1, p=prob_dist) # one_hot_indicator = np.where(v == chosen_item, 1, 0) # return v * one_hot_indicator return result - elif mode is not DETERMINISTIC: + elif mode != DETERMINISTIC: direction, abs_val, indicator, tie = self._parse_mode(mode) - # if np.array(variable).ndim != 1: - # raise FunctionError(f"If {MODE} for {self.__class__.__name__} {Function.__name__} is not set to " - # f"'PROB' or 'PROB_INDICATOR', variable must be a 1d array: {variable}.") - array = variable - max = None - min = None - if abs_val is True: - array = np.absolute(array) + array = np.absolute(variable) if direction == MAX: - max = np.max(array) - if max == -np.inf: - warnings.warn(f"Array passed to {self.name} of {self.owner.name} " - f"is all -inf.") + extreme_val = np.max(array) + if extreme_val == -np.inf: + warnings.warn(f"Array passed to {self.name} of {self.owner.name} is all -inf.") + + elif direction == MIN: + extreme_val = np.min(array) + if extreme_val == np.inf: + warnings.warn(f"Array passed to {self.name} of {self.owner.name} is all inf.") + else: - min = np.min(array) - if min == np.inf: - warnings.warn(f"Array passed to {self.name} of {self.owner.name} " - f"is all inf.") + assert False, f"Unknown direction: '{direction}'." - extreme_val = max if direction == MAX else min + extreme_indices = np.where(array == extreme_val) + + num_indices = len(extreme_indices[0]) + assert all(len(idx) == num_indices for idx in extreme_indices) + + if tie == FIRST: + selected_idx = 0 + + elif tie == LAST: + selected_idx = -1 + + elif tie == RANDOM: + random_state = self._get_current_parameter_value("random_state", context) + selected_idx = random_state.randint(num_indices) + + elif tie == ALL: + selected_idx = slice(num_indices) - if tie == ALL: - if direction == MAX: - result = np.where(array == max, max, -np.inf) - else: - result = np.where(array == min, min, np.inf) else: - if tie == FIRST: - index = np.min(np.where(array == extreme_val)) - elif tie == LAST: - index = np.max(np.where(array == extreme_val)) - elif tie == RANDOM: - index = np.random.choice(np.where(array == extreme_val)) - else: - assert False, f"PROGRAM ERROR: Unrecognized value for 'tie' in OneHot function: '{tie}'." - result = np.zeros_like(array) - result[index] = extreme_val - - if indicator is True: - result = np.where(result == extreme_val, 1, result) - if max is not None: - result = np.where(result == -np.inf, 0, result) - if min is not None: - result = np.where(result == np.inf, 0, result) + assert False, f"PROGRAM ERROR: Unrecognized value for 'tie' in OneHot function: '{tie}'." + + + set_indices = tuple(index[selected_idx] for index in extreme_indices) + + result = np.zeros_like(variable) + result[set_indices] = 1 if indicator else extreme_val return self.convert_output_type(result) From 59c9736bc171c055b10bab0b9cbbdd37415acdc3 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 13 Nov 2024 19:48:35 -0500 Subject: [PATCH 05/34] llvm/Mechanism: Reinit integrator_function in Mechanism reset if present (#3112) Fixes: ticket #106903 Signed-off-by: Jan Vesely --- .../core/components/mechanisms/mechanism.py | 41 ++++++++++++++++--- tests/mechanisms/test_mechanisms.py | 19 +++++++++ 2 files changed, 55 insertions(+), 5 deletions(-) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index e261ce5eb3..aae1969640 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -3197,12 +3197,41 @@ def _gen_llvm_function_reset(self, ctx, builder, m_base_params, m_state, m_arg_i reinit_in = builder.alloca(reinit_func.args[2].type.pointee, name="reinit_in") reinit_out = builder.alloca(reinit_func.args[3].type.pointee, name="reinit_out") - reinit_base_params, reinit_state = ctx.get_param_or_state_ptr(builder, self, "function", param_struct_ptr=m_base_params, state_struct_ptr=m_state) - reinit_params, builder = self._gen_llvm_param_ports_for_obj( - self.function, reinit_base_params, ctx, builder, m_base_params, m_state, m_arg_in) + reinit_base_params, reinit_state = ctx.get_param_or_state_ptr(builder, + self, + "function", + param_struct_ptr=m_base_params, + state_struct_ptr=m_state) + reinit_params, builder = self._gen_llvm_param_ports_for_obj(self.function, + reinit_base_params, + ctx, + builder, + m_base_params, + m_state, + m_arg_in) builder.call(reinit_func, [reinit_params, reinit_state, reinit_in, reinit_out]) + if hasattr(self, "integrator_function") and getattr(self, "integrator_mode", False): + reinit_func = ctx.import_llvm_function(self.integrator_function, tags=tags) + reinit_in = builder.alloca(reinit_func.args[2].type.pointee, name="integrator_reinit_in") + reinit_out = builder.alloca(reinit_func.args[3].type.pointee, name="integrator_reinit_out") + + reinit_base_params, reinit_state = ctx.get_param_or_state_ptr(builder, + self, + "integrator_function", + param_struct_ptr=m_base_params, + state_struct_ptr=m_state) + reinit_params, builder = self._gen_llvm_param_ports_for_obj(self.integrator_function, + reinit_base_params, + ctx, + builder, + m_base_params, + m_state, + m_arg_in) + + builder.call(reinit_func, [reinit_params, reinit_state, reinit_in, reinit_out]) + return builder def _gen_llvm_function(self, *, extra_args=[], ctx:pnlvm.LLVMBuilderContext, tags:frozenset): @@ -3212,9 +3241,11 @@ def _gen_llvm_function(self, *, extra_args=[], ctx:pnlvm.LLVMBuilderContext, tag Mechanisms need to support "is_finished" execution variant (used by scheduling conditions) on top of the variants supported by Component. """ + + # Call parent "_gen_llvm_function", this should result in calling + # "_gen_llvm_function_body" below if "is_finished" not in tags: - return super()._gen_llvm_function(extra_args=extra_args, ctx=ctx, - tags=tags) + return super()._gen_llvm_function(extra_args=extra_args, ctx=ctx, tags=tags) # Keep all 4 standard arguments to ease invocation args = [ctx.get_param_struct_type(self).as_pointer(), diff --git a/tests/mechanisms/test_mechanisms.py b/tests/mechanisms/test_mechanisms.py index f07b2810aa..c326b5c1a3 100644 --- a/tests/mechanisms/test_mechanisms.py +++ b/tests/mechanisms/test_mechanisms.py @@ -280,3 +280,22 @@ def test_reset_state_transfer_mechanism(self): np.testing.assert_allclose(output_after_saving_state, output_after_reinitialization) np.testing.assert_allclose(original_output, [np.array([[0.5]]), np.array([[0.75]])]) np.testing.assert_allclose(output_after_reinitialization, [np.array([[0.875]]), np.array([[0.9375]])]) + + @pytest.mark.usefixtures("comp_mode_no_llvm") + def test_reset_integrator_function(self, comp_mode): + """This test checks that the Mechanism.integrator_function is reset when the mechanism is""" + + threshold_mech = pnl.TransferMechanism(input_shapes=1, + default_variable=0, + integrator_function=pnl.SimpleIntegrator(rate=1, offset=-0.001), + function=pnl.Linear(intercept=0.06, slope=1), + integrator_mode=True, + execute_until_finished=True, + termination_threshold=10, + reset_stateful_function_when=pnl.AtTrialStart(), + termination_measure=pnl.TimeScale.TRIAL) + comp = pnl.Composition() + comp.add_node(threshold_mech) + + results = comp.run(inputs=[[0.0], [0.0]], execution_mode=comp_mode) + np.testing.assert_allclose(results, [[0.05]]) From 3fc73e1d0e5697b2740775a6c4b1ba8833ec8739 Mon Sep 17 00:00:00 2001 From: kmantel <1592123+kmantel@users.noreply.github.com> Date: Thu, 14 Nov 2024 02:07:11 -0500 Subject: [PATCH 06/34] Parameter: correct .set post-initialization check (#3116) Parameter.set checks for Component values assigned after its owning Component's __init__ so that the assigned Components can be shaped compatibly and set up for later context initialization. Check for ContextFlags.INITIALIZED initialization_status instead of is_initialized, because is_initialized also considers any initialization context, not just the status of having completed Component.__init__ --- psyneulink/core/globals/parameters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 1d2bfeae32..f79ce831aa 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1516,7 +1516,7 @@ def set(self, value, context=None, override=False, skip_history=False, skip_log= if isinstance(value, Component): owner = self._owner._owner if value not in owner._parameter_components: - if not owner.is_initializing: + if owner.initialization_status == ContextFlags.INITIALIZED: value._initialize_from_context(context) owner._parameter_components.add(value) From ee61d35dbb0a15766101c29685697a8f5994d634 Mon Sep 17 00:00:00 2001 From: jdcpni Date: Thu, 14 Nov 2024 06:32:35 -0500 Subject: [PATCH 07/34] Fix/matrix transform l0 (#3113) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • transformfunctions.py - MatrixTransform: allow normaliation for L0 • emcomposition.py - enforce normalize_memories for len(keys)==1 • test_emcomposition.py - test_simple_execution_without_learning(): add tests for scalar keys & use of L0 in MatrixTransform --- docs/source/CombinationFunctions.rst | 11 --- docs/source/Core.rst | 4 +- docs/source/NonStatefulFunctions.rst | 5 +- docs/source/TransformFunctions.rst | 11 +++ .../nonstateful/transformfunctions.py | 94 +++++++++---------- .../library/compositions/emcomposition.py | 7 +- tests/composition/test_emcomposition.py | 64 +++++++------ 7 files changed, 101 insertions(+), 95 deletions(-) delete mode 100644 docs/source/CombinationFunctions.rst create mode 100644 docs/source/TransformFunctions.rst diff --git a/docs/source/CombinationFunctions.rst b/docs/source/CombinationFunctions.rst deleted file mode 100644 index 31a55947cc..0000000000 --- a/docs/source/CombinationFunctions.rst +++ /dev/null @@ -1,11 +0,0 @@ -CombinationFunctions -==================== - -.. toctree:: - :maxdepth: 3 - -.. automodule:: psyneulink.core.components.functions.combinationfunctions - :members: Concatenate, Rearrange, Reduce, LinearCombination, CombineMeans, PredictionErrorDeltaFunction - :private-members: - :exclude-members: Parameters - diff --git a/docs/source/Core.rst b/docs/source/Core.rst index 292689884f..ea1f1b7105 100644 --- a/docs/source/Core.rst +++ b/docs/source/Core.rst @@ -57,8 +57,6 @@ Core - `NonStatefulFunctions` - - `CombinationFunctions` - - `DistributionFunctions` - `LearningFunctions` @@ -71,6 +69,8 @@ Core - `TransferFunctions` + - `TransformFunctions` + - `StatefulFunctions` - `IntegratorFunctions` diff --git a/docs/source/NonStatefulFunctions.rst b/docs/source/NonStatefulFunctions.rst index 55ad922776..f69c780ad6 100644 --- a/docs/source/NonStatefulFunctions.rst +++ b/docs/source/NonStatefulFunctions.rst @@ -8,10 +8,11 @@ Functions that do *not* depend on a previous value. .. toctree:: :maxdepth: 1 - CombinationFunctions + DistributionFunctions LearningFunctions ObjectiveFunctions OptimizationFunctions SelectionFunctions - TransferFunctions \ No newline at end of file + TransferFunctions + TransformFunctions \ No newline at end of file diff --git a/docs/source/TransformFunctions.rst b/docs/source/TransformFunctions.rst new file mode 100644 index 0000000000..70cd2194ad --- /dev/null +++ b/docs/source/TransformFunctions.rst @@ -0,0 +1,11 @@ +TransformFunctions +================== + +.. toctree:: + :maxdepth: 3 + +.. automodule:: psyneulink.core.components.functions.transformfunctions + :members: Concatenate, Rearrange, Reduce, LinearCombination, CombineMeans, MatrixTransform, PredictionErrorDeltaFunction + :private-members: + :exclude-members: Parameters + diff --git a/psyneulink/core/components/functions/nonstateful/transformfunctions.py b/psyneulink/core/components/functions/nonstateful/transformfunctions.py index bd0403bfcf..86c1db6b7b 100644 --- a/psyneulink/core/components/functions/nonstateful/transformfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transformfunctions.py @@ -1628,20 +1628,48 @@ class MatrixTransform(TransformFunction): # ----------------------------------- Matrix transform of `variable `. - `function ` returns dot product of variable with matrix: + `function ` returns a matrix transform of `variable ` + based on the **operation** argument. - .. math:: - variable \\bullet matrix + **operation** = *DOT_PRODUCT*: - If *DOT_PRODUCT* is specified as the **operation*, the result is the dot product of `variable - ` and `matrix `; if *L0* is specified, the result is the - difference between `variable ` and `matrix ` (see - `operation ` for additional details). + Returns the dot (inner) product of `variable ` and `matrix `: - If **normalize** is True, the result is normalized by the product of the norms of the variable and matrix: + .. math:: + {variable} \\bullet |matrix| + + If **normalize** =True, the result is normalized by the product of the norms of the variable and matrix: + + .. math:: + \\frac{variable \\bullet matrix}{\\|variable\\| \\cdot \\|matrix\\|} + + .. note:: + For **normalize** =True, the result is the same as the cosine of the angle between pairs of vectors. + + **operation** = *L0*: + + Returns the absolute value of the difference between `variable ` and `matrix + `: + + .. math:: + |variable - matrix| + + If **normalize** =True, the result is normalized by the norm of the sum of differences between the variable and + matrix, which is then subtracted from 1: + + .. math:: + 1 - \\frac{|variable - matrix|}{\\|variable - matrix\\|} + + .. note:: + For **normalize** =True, the result has the same effect as the normalized *DOT_PRODUCT* operation, + with more similar pairs of vectors producing larger values (closer to 1). + + .. warning:: + For **normalize** =False, the result is smaller (closer to 0) for more similar pairs of vectors, + which is **opposite** the effect of the *DOT_PRODUCT* and normalized *L0* operations. If the desired + result is that more similar pairs of vectors produce larger values, set **normalize** =True or + use the *DOT_PRODUCT* operation. - .. math:: - \\frac{variable \\bullet matrix}{\\|variable\\| \\cdot \\|matrix\\|} COMMENT: [CONVERT TO FIGURE] ---------------------------------------------------------------------------------------------------------- @@ -1679,7 +1707,7 @@ class MatrixTransform(TransformFunction): # ----------------------------------- specifies matrix used to transform `variable ` (see `matrix ` for specification details). - When MatrixTransform is the `function ` of a projection: + When MatrixTransform is the `function ` of a projection: - the matrix specification must be compatible with the variables of the `sender ` and `receiver ` @@ -1795,15 +1823,6 @@ class Parameters(TransformFunction.Parameters): normalize = Parameter(False) bounds = None - # def is_matrix_spec(m): - # if m is None: - # return True - # if m in MATRIX_KEYWORD_VALUES: - # return True - # if isinstance(m, (list, np.ndarray, types.FunctionType)): - # return True - # return False - @check_user_specified @beartype def __init__(self, @@ -1833,25 +1852,6 @@ def __init__(self, skip_log=True, ) - # def _validate_variable(self, variable, context=None): - # """Insure that variable passed to MatrixTransform is a max 2D array - # - # :param variable: (max 2D array) - # :param context: - # :return: - # """ - # variable = super()._validate_variable(variable, context) - # - # # Check that variable <= 2D - # try: - # if not variable.ndim <= 2: - # raise FunctionError("variable ({0}) for {1} must be a numpy.ndarray of dimension at most 2".format(variable, self.__class__.__name__)) - # except AttributeError: - # raise FunctionError("PROGRAM ERROR: variable ({0}) for {1} should be a numpy.ndarray". - # format(variable, self.__class__.__name__)) - # - # return variable - def _validate_params(self, request_set, target_set=None, context=None): """Validate params and assign to targets @@ -2013,15 +2013,6 @@ def _validate_params(self, request_set, target_set=None, context=None): self.name, self.owner_name, MATRIX_KEYWORD_NAMES)) - - # operation param - elif param_name == OPERATION: - if param_value == L0 and NORMALIZE in param_set and param_set[NORMALIZE]: - raise FunctionError(f"The 'operation' parameter for the {self.name} function of " - f"{self.owner_name} is set to 'L0', so the 'normalize' parameter " - f"should not be set to True " - f"(normalization is not needed, and can cause a divide by zero error). " - f"Set 'normalize' to False or change 'operation' to 'DOT_PRODUCT'.") else: continue @@ -2176,7 +2167,7 @@ def diff_with_normalization(vector, matrix): if normalize: return diff_with_normalization else: - return lambda x, y: torch.sum((1 - torch.abs(x - y)),axis=0) + return lambda x, y: torch.sum(torch.abs(x - y),axis=0) else: from psyneulink.library.compositions.autodiffcomposition import AutodiffCompositionError @@ -2224,10 +2215,11 @@ def _function(self, result = np.dot(vector, matrix) elif operation == L0: - normalization = 1 if normalize: normalization = np.sum(np.abs(vector - matrix)) - result = np.sum(((1 - np.abs(vector - matrix)) / normalization),axis=0) + result = np.sum((1 - (np.abs(vector - matrix)) / normalization),axis=0) + else: + result = np.sum((np.abs(vector - matrix)),axis=0) return self.convert_output_type(result) diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index 46acce0308..8992e053c9 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -2201,7 +2201,11 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_q """ OPERATION = 0 NORMALIZE = 1 - args = [(L0,False) if len(key) == 1 else (DOT_PRODUCT,normalize_memories) for key in memory_template[0]] + # Enforce normalization of memories if key is a scalar + # (this is to allow 1-L0 distance to be used as similarity measure, so that better matches + # (more similar memories) have higher match values; see `MatrixTransform` for explanation) + args = [(L0,True) if len(key) == 1 else (DOT_PRODUCT,normalize_memories) + for key in memory_template[0]] if concatenate_queries: # Get fields of memory structure corresponding to the keys @@ -2238,7 +2242,6 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_q for i in range(self.num_keys) ] - return match_nodes # FIX: CONVERT TO _construct_weight_control_nodes diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index 55c01ad7b5..024076d9ec 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -258,7 +258,7 @@ class TestExecution: # NOTE: None => use default value (i.e., don't specify in constructor, rather than forcing None as value of arg) # ---------------------------------------- SPECS ----------------------------------- ----- EXPECTED --------- # memory_template mem mem mem fld concat nlz sm str inputs expected_retrieval - # fill cap decay wts keys gain prob + # fill cap decay wts keys mem gain prob # ---------------------------------------------------------------------------------- ------------------------ (0, [[[1,2,3],[4,6]], [[1,2,5],[4,8]], @@ -266,26 +266,26 @@ class TestExecution: [4., 6.16540637]]), (1, [[[1,2,3],[4,6]], [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], None, 3, 0, [1,0], None, None, 100, 0, [[[1, 2, 3]], - [[4, 6]]], [[1., 2., 3.16585899], + [[1,2,10],[4,10]]], None, 3, 0, [1,0], None, None, 100, 0, [[1, 2, 3], + [4, 6]], [[1., 2., 3.16585899], [4., 6.16540637]]), (2, [[[1,2,3],[4,6]], [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], None, 3, 0, [1,0], None, None, 100, 0, [[[1, 2, 3]], - [[4, 8]]], [[1., 2., 3.16585899], + [[1,2,10],[4,10]]], None, 3, 0, [1,0], None, None, 100, 0, [[1, 2, 3], + [4, 8]], [[1., 2., 3.16585899], [4., 6.16540637]]), (3, [[[1,2,3],[4,6]], [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, 0, [1,0], None, None, 100, 0, [[[1, 2, 3]], - [[4, 8]]], [[0.99998628, + [[1,2,10],[4,10]]], (0,.01), 4, 0, [1,0], None, None, 100, 0, [[1, 2, 3], + [4, 8]], [[0.99998628, 1.99997247, 3.1658154 ], [3.99994492, 6.16532141]]), (4, [[[1,2,3],[4,6]], # Equal field_weights (but not concatenated) [[1,2,5],[4,6]], - [[1,2,10],[4,6]]], (0,.01), 4, 0, [1,1], None, None, 100, 0, [[[1, 2, 3]], - [[4, 6]]], [[0.99750462, + [[1,2,10],[4,6]]], (0,.01), 4, 0, [1,1], None, None, 100, 0, [[1, 2, 3], + [4, 6]], [[0.99750462, 1.99499376, 3.51623568], [3.98998465, @@ -293,62 +293,67 @@ class TestExecution: ), (5, [[[1,2,3],[4,6]], # Equal field_weights with concatenation [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, 0, [1,1], True, None, 100, 0, [[[1, 2, 4]], - [[4, 6]]], [[0.99898504, + [[1,2,10],[4,10]]], (0,.01), 4, 0, [1,1], True, None, 100, 0, [[1, 2, 4], + [4, 6]], [[0.99898504, 1.99796378, 4.00175037], [3.99592639, 6.97406456]]), (6, [[[1,2,3],[4,6]], # Unequal field_weights [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, 0, [9,1], None, None, 100, 0, [[[1, 2, 3]], - [[4, 6]]], [[0.99996025, + [[1,2,10],[4,10]]], (0,.01), 4, 0, [9,1], None, None, 100, 0, [[1, 2, 3], + [4, 6]], [[0.99996025, 1.99992024, 3.19317783], [3.99984044, 6.19219795]]), (7, [[[1,2,3],[4,6]], # Store + no decay [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, 0, [9,1], None, None, 100, 1, [[[1, 2, 3]], - [[4, 6]]], [[0.99996025, + [[1,2,10],[4,10]]], (0,.01), 4, 0, [9,1], None, None, 100, 1, [[1, 2, 3], + [4, 6]], [[0.99996025, 1.99992024, 3.19317783], [3.99984044, 6.19219795]]), (8, [[[1,2,3],[4,6]], # Store + default decay (should be AUTO) [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, None, [9,1], None, None, 100, 1, [[[1, 2, 3]], - [[4, 6]]], [[0.99996025, + [[1,2,10],[4,10]]], (0,.01), 4, None, [9,1], None, None, 100, 1,[[1, 2, 3], + [4, 6]], [[0.99996025, 1.99992024, 3.19317783], [3.99984044, 6.19219795]]), (9, [[[1,2,3],[4,6]], # Store + explicit AUTO decay [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, AUTO, [9,1], None, None, 100, 1, [[[1, 2, 3]], - [[4, 6]]], [[0.99996025, + [[1,2,10],[4,10]]], (0,.01), 4, AUTO, [9,1], None, None, 100, 1, [[1, 2, 3], + [4, 6]], [[0.99996025, 1.99992024, 3.19317783], [3.99984044, 6.19219795]]), (10, [[[1,2,3],[4,6]], # Store + numerical decay [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, .1, [9,1], None, None, 100, 1, [[[1, 2, 3]], - [[4, 6]]], [[0.99996025, + [[1,2,10],[4,10]]], (0,.01), 4, .1, [9,1], None, None, 100, 1, [[1, 2, 3], + [4, 6]], [[0.99996025, 1.99992024, 3.19317783], [3.99984044, 6.19219795]]), (11, [[[1,2,3],[4,6]], # Same as 10, but with equal weights and concatenate keys [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, .1, [1,1], True, None, 100, 1, [[[1, 2, 3]], - [[4, 6]]], [[0.99922544, + [[1,2,10],[4,10]]], (0,.01), 4, .1, [1,1], True, None, 100, 1, [[1, 2, 3], + [4, 6]], [[0.99922544, 1.99844608, 3.38989346], [3.99689126, 6.38682264]]), -# [3.99984044, -# 6.19219795]]), + + (12, [[[1],[2],[3]], # Scalar keys - exact match (this tests use of L0 for retreieval in MEMORY matrix) + [[10],[0],[100]]], (0,.01), 3, 0, [1,1,0], None, None, pnl.ARG_MAX, 1, [[10],[0],[100]], + [[10],[0],[100]]), + + (13, [[[1],[2],[3]], # Scalar keys - close match (this tests use of L0 for retreieval in MEMORY matrix + [[10],[0],[100]]], (0,.01), 3, 0, [1,1,0], None, None, pnl.ARG_MAX, 1, [[2],[3],[4]], [[1],[2],[3]]), ] args_names = "test_num, memory_template, memory_fill, memory_capacity, memory_decay_rate, field_weights, " \ @@ -401,7 +406,11 @@ def test_simple_execution_without_learning(self, if normalize_memories is not None: params.update({'normalize_memories': normalize_memories}) if softmax_gain is not None: - params.update({'softmax_gain': softmax_gain}) + if softmax_gain == pnl.ARG_MAX: + params.update({'softmax_choice': softmax_gain}) + params.update({'softmax_gain': 100}) + else: + params.update({'softmax_gain': softmax_gain}) if storage_prob is not None: params.update({'storage_prob': storage_prob}) params.update({'softmax_threshold': None}) @@ -432,7 +441,8 @@ def test_simple_execution_without_learning(self, # Validate storage if storage_prob: - for actual, expected in zip(em.memory[-1], [[1,2,3],[4,6]]): + # for actual, expected in zip(em.memory[-1], [[1,2,3],[4,6]]): + for actual, expected in zip(em.memory[-1], list(inputs.values())): np.testing.assert_array_equal(actual, expected) if memory_decay_rate in {None, AUTO}: From 7542b7565001118c2a9f67a2af8b83ee8e7b5cd7 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 15 Nov 2024 08:43:43 -0500 Subject: [PATCH 08/34] broken_trans_deps: Block coverage==7.6.5 (#3118) https://github.com/nedbat/coveragepy/issues/1891 Signed-off-by: Jan Vesely --- broken_trans_deps.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/broken_trans_deps.txt b/broken_trans_deps.txt index f7b44a2bbe..820cffb769 100644 --- a/broken_trans_deps.txt +++ b/broken_trans_deps.txt @@ -34,6 +34,10 @@ cattrs != 23.2.1, != 23.2.2 # https://github.com/beartype/beartype/issues/324 beartype != 0.17.1; python_version == '3.9' +# coverage 7.6.5 is broken +# https://github.com/nedbat/coveragepy/issues/1891 +coverage != 7.6.5 + # The following need at least sphinx-5 without indicating it in dependencies: # * sphinxcontrib-applehelp >=1.0.8, # * sphinxcontrib-devhelp >=1.0.6, From 67e6d0e473361e0c08502a5e7d959dff57897629 Mon Sep 17 00:00:00 2001 From: jdcpni Date: Fri, 15 Nov 2024 11:06:03 -0500 Subject: [PATCH 09/34] Feat/emcomposition/assign field weights (#3117) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * • emcomposition.py - add field_weights setter and getter - field_weights_setter(): check that value node is not be assigned a weight • mechanism.py Mechanism_Base.execute(): add check whether any input_ports have default_input set in determining whether or not to to call _update_input_ports --- .../core/components/mechanisms/mechanism.py | 6 +- psyneulink/core/compositions/showgraph.py | 10 ++-- .../library/compositions/emcomposition.py | 52 +++++++++++++++++- tests/composition/test_emcomposition.py | 55 ++++++++++++++++++- 4 files changed, 113 insertions(+), 10 deletions(-) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index aae1969640..a63f68da69 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -2520,15 +2520,17 @@ def execute(self, # Executing or simulating Composition, so get input by updating input_ports if (input is None and (context.execution_phase is not ContextFlags.IDLE) - and any(p.path_afferents for p in self.input_ports)): + and (any((p.path_afferents or p.default_input) for p in self.input_ports))): variable = self._update_input_ports(runtime_port_params[INPUT_PORT_PARAMS], context) - # Direct call to execute Mechanism with specified input, so assign input to Mechanism's input_ports else: + # Direct call to execute Mechanism with specified input, so assign input to Mechanism's input_ports if context.source & ContextFlags.COMMAND_LINE: context.execution_phase = ContextFlags.PROCESSING if input is not None: input = convert_all_elements_to_np_array(input) + + # No input was specified, so use Mechanism's default variable if input is None: input = self.defaults.variable # FIX: this input value is sent to input CIMs when compositions are nested diff --git a/psyneulink/core/compositions/showgraph.py b/psyneulink/core/compositions/showgraph.py index 1c766592f2..6d48d9e7be 100644 --- a/psyneulink/core/compositions/showgraph.py +++ b/psyneulink/core/compositions/showgraph.py @@ -2708,10 +2708,12 @@ def get_index_of_node_in_G_body(node, node_type: Literal['MECHANISM', 'Projectio raise ShowGraphError(f"Bad arg in call to {composition.name}.show_graph: '{output_fmt}'.") except ShowGraphError as e: - raise ShowGraphError(str(e.error_value)) - - except: - raise ShowGraphError(f"Problem displaying graph for {composition.name}") + # raise ShowGraphError(str(e.error_value)) + raise ShowGraphError(str(e.error_value)) from e + # except: + # raise ShowGraphError(f"Problem displaying graph for {composition.name}") + except Exception as e: + raise ShowGraphError(f"Problem displaying graph for {composition.name}: {e}") from e def _is_composition_controller(self, mech, context, enclosing_comp=None): # FIX 6/12/20: REPLACE WITH TEST FOR NodeRole.CONTROLLER ONCE THAT IS IMPLEMENTED diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index 8992e053c9..af84a5b768 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -13,6 +13,8 @@ # - SHOULD differential of SoftmaxGainControl Node be included in learning? # - SHOULD MEMORY DECAY OCCUR IF STORAGE DOES NOT? CURRENTLY IT DOES NOT (SEE EMStorage Function) +# - FIX: Refactor field_weights to use None instead of 0 to specify value fields, and allow inputs to field_nodes +# - FIX: ALLOW SOFTMAX SPEC TO BE A DICT WITH PARAMETERS FOR _get_softmax_gain() FUNCTION # - FIX: Concatenation: # - LLVM for function and derivative # - Add Concatenate to pytorchcreator_function @@ -28,6 +30,7 @@ # - list with number of entries > memory_capacity if specified # - input is added to the correct row of the matrix for each key and value for # for non-contiguous keys (e.g, field_weights = [1,0,1])) +# - illegal field weight assignment # - explicitly that storage occurs after retrieval # - FIX: WARNING NOT OCCURRING FOR Normalize ON ZEROS WITH MULTIPLE ENTRIES (HAPPENS IF *ANY* KEY IS EVER ALL ZEROS) # - FIX: IMPLEMENT LearningMechanism FOR RETRIEVAL WEIGHTS: @@ -58,7 +61,6 @@ # - FIX: ADD NOISE # - FIX: ?ADD add_memory() METHOD FOR STORING W/O RETRIEVAL, OR JUST ADD retrieval_prob AS modulable Parameter # - FIX: CONFIDENCE COMPUTATION (USING SIGMOID ON DOT PRODUCTS) AND REPORT THAT (EVEN ON FIRST CALL) -# - FIX: ALLOW SOFTMAX SPEC TO BE A DICT WITH PARAMETERS FOR _get_softmax_gain() FUNCTION # MISC: # - WRITE TESTS FOR INPUT_PORT and MATRIX SPECS CORRECT IN LATEST BRANCHs # - ACCESSIBILITY OF DISTANCES (SEE BELOW): MAKE IT A LOGGABLE PARAMETER (I.E., WITH APPROPRIATE SETTER) @@ -449,6 +451,21 @@ corresponding fields during retrieval (see `Weight fields `). In either case, the remaining fields (with zero weights) are treated as value fields. + _EMComposition_Field_Weights_Note: + .. note:: + The field_weights can be modified after the EMComposition has been constructed, by assigning a new set of weights + to its `field_weights ` `Parameter`. However, only field_weights associated with + key fields (i.e., were initially assigned non-zero field_weights) can be modified; the weights for value fields + (i.e., ones that were initially assigned a field_weight of 0) cannot be modified, and an attempt to do so will + generate an error. If a field initially used as a value may later need to be used as a key, it should be + assigned a non-zero field_weight when the EMComposition is constructed; it can then be assigned 0 just after + construction, and later changed as needed. + + .. technical_note:: + The reason that only field_weights for keys can be modified is that only `field_weight_nodes + ` for keys are constructed, since ones for values would have no effect on the + retrieval process and thus are uncecessary. + .. _EMComposition_Normalize_Field_Weights: * **normalize_field_weights**: specifies whether the `field_weights ` are normalized @@ -1098,6 +1115,32 @@ def _memory_getter(owning_component=None, context=None)->list: for i in range(memory_capacity) ]) +def field_weights_setter(field_weights, owning_component=None, context=None): + # FIX: ALLOW DICTIONARY WITH FIELD NAME AND WEIGHT + if owning_component.field_weights is None: + return field_weights + elif len(field_weights) != len(owning_component.field_weights): + raise EMCompositionError(f"The number of field_weights ({len(field_weights)}) must match the number of fields " + f"{len(owning_component.field_weights)}") + if owning_component.normalize_field_weights: + field_weights = field_weights / np.sum(field_weights) + field_wt_node_idx = 0 # Needed since # of field_weight_nodes may be less than # of fields + for i, field_weight in enumerate(field_weights): + # Check if original value was 0 (i.e., a value node), in which case disallow change + if not owning_component.parameters.field_weights.default_value[i]: + if field_weight: + raise EMCompositionError(f"Field '{owning_component.field_names[i]}' of '{owning_component.name}' " + f"was originally assigned as a value node (i.e., with a field_weight = 0); " + f"this cannot be changed after construction. If you want to change it to a " + f"key field, you must re-construct the EMComposition using a non-zero value " + f"for its field in the `field_weights` arg, " + f"which can then be changed to 0 after construction.") + continue + owning_component.field_weight_nodes[field_wt_node_idx].input_port.defaults.variable = field_weights[i] + owning_component.field_weights[i] = field_weights[i] + field_wt_node_idx += 1 + return field_weights + def get_softmax_gain(v, scale=1, base=1, entropy_weighting=.1)->float: """Compute the softmax gain (inverse temperature) based on the entropy of the distribution of values. scale * (base + (entropy_weighting * log(entropy(logistic(v)))))))) @@ -1252,7 +1295,10 @@ class EMComposition(AutodiffComposition): `memory ` for retrieval, and which are used as "values" (zero values) that are stored and retrieved from memory but not used in the match process (see `Match memories by field `; also determines the relative contribution of each key field to the match process; - see `field_weights ` additional details. + see `field_weights ` additional details. The field_weights can be changed by + assigning a new list of weights to the `field_weights ` attribute, however only + the weights for fields used as `keys ` can be changed (see + `EMComposition_Field_Weights_Note` for additional details). normalize_field_weights : bool : default True determines whether `fields_weights ` are normalized over the number of keys, or @@ -1518,7 +1564,7 @@ class Parameters(AutodiffComposition.Parameters): memory = Parameter(None, loggable=True, getter=_memory_getter, read_only=True) memory_template = Parameter([[0],[0]], structural=True, valid_types=(tuple, list, np.ndarray), read_only=True) memory_capacity = Parameter(1000, structural=True) - field_weights = Parameter(None) + field_weights = Parameter(None, setter=field_weights_setter) normalize_field_weights = Parameter(True) field_names = Parameter(None, structural=True) concatenate_queries = Parameter(False, structural=True) diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index 024076d9ec..a46411dea3 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -363,7 +363,8 @@ class TestExecution: ids=[x[0] for x in test_execution_data]) @pytest.mark.parametrize('enable_learning', [False, True], ids=['no_learning','learning']) @pytest.mark.composition - @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.Python, pnl.ExecutionMode.PyTorch]) + @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.Python, pnl.ExecutionMode.PyTorch], + ids=['Python','PyTorch']) def test_simple_execution_without_learning(self, exec_mode, enable_learning, @@ -466,6 +467,58 @@ def test_simple_execution_without_learning(self, memory_fill = memory_fill or 0 assert all(elem == memory_fill for elem in em.memory[-1]) + @pytest.mark.parametrize('data', + (([[[5], [0], [10]], # 1d template + [[0], [5], [10]], + [[0.1], [0.1], [10]], + [[0.1], [0.1], [10]]], + [[5], [5], [10]], # 1d query + pnl.L0 # 1d retrieval operation + ), + ([[[5,0], [0,5], [10]], # 2d template + [[0,5], [5,0], [10]], + [[0.1, 0.1], [0.1, 0.1], [0.1]], + [[0.1, 0.1], [0.1, 0.1], [0.1]]], + [[5,0], [5,0], [10]], # 2d query + pnl.DOT_PRODUCT)), # 2d retrieval operation + ids=['1d', '2d']) + @pytest.mark.composition + @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.Python, pnl.ExecutionMode.PyTorch]) + def test_em_field_weights_assignment(self, exec_mode, data): + EM_assign_template = data[0] + em = pnl.EMComposition(memory_template=EM_assign_template, + memory_capacity=4, + memory_decay_rate= 0, + memory_fill=0.001, + enable_learning = False, + softmax_choice=pnl.ARG_MAX, + field_weights=(.75,.25,0), + field_names=['A','B','C']) + # Confirm initial weight assginments (that favor A) + assert em.nodes['A [WEIGHT]'].input_port.defaults.variable == [.75] + assert em.nodes['B [WEIGHT]'].input_port.defaults.variable == [.25] + # Confirm use of L0 for retrieval since keys for A and B are scalars + assert em.projections['MEMORY for A [KEY]'].function.operation == data[2] + assert em.projections['MEMORY for B [KEY]'].function.operation == data[2] + # Change fields weights to favor B + em.field_weights = [0,1,0] + # Ensure weights got changed + assert em.nodes['A [WEIGHT]'].input_port.defaults.variable == [0] + assert em.nodes['B [WEIGHT]'].input_port.defaults.variable == [1] + # Note: The input matches both fields A and B; + test_input = {em.nodes['A [QUERY]']: [data[1][0]], + em.nodes['B [QUERY]']: [data[1][1]], + em.nodes['C [VALUE]']: [data[1][2]]} + result = em.run(test_input, execution_mode=exec_mode) + # If the weights change DIDN'T get used, it should favor field A and return [5,0,10] as the best match + # If weights change DID get used, it should favor field B and return [0,5,10] as the best match + for i,j in zip(result, data[0][1]): + assert (i == j).all() + # Change weights back and confirm that it now favors A + em.field_weights = [1,0,0] + result = em.run(test_input, execution_mode=exec_mode) + for i,j in zip(result, data[0][0]): + assert (i == j).all() @pytest.mark.composition @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.Python, pnl.ExecutionMode.PyTorch]) From 0e1dbaec2d82f1b3137470751c71481241a586d9 Mon Sep 17 00:00:00 2001 From: jdcpni Date: Fri, 15 Nov 2024 12:44:02 -0500 Subject: [PATCH 10/34] fix/standard_output_ports_calculate (#3114) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * • recurrenttransfermechanism.py - _instantiate_attributes_after_function(): fix assignment of StabilityFunction, and force update of default_variable for output_port • _instantiate_attributes_after_function: revise call to _update_default_variable to use energy.variable instead of value * • test_recurrent_transfer_mechanism.py - TestStandardOutputPorts: test_rtn_energy() test_rtn_entropy() * • recurrenttransfermechanism.py _instantiate_attributes_after_function(): refactor to assign functions for ENERGY and ENTROPY standard_output_ports before call to super() --- .../nonstateful/objectivefunctions.py | 55 +++++++++++-------- .../transfer/recurrenttransfermechanism.py | 51 ++++++++--------- .../test_recurrent_transfer_mechanism.py | 35 ++++++++++++ 3 files changed, 90 insertions(+), 51 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py index 4a7d890028..d68512493b 100644 --- a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py +++ b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py @@ -143,7 +143,7 @@ class Stability(ObjectiveFunction): length of array for which stability is calculated. matrix : list, np.ndarray, function keyword, or MappingProjection : default HOLLOW_MATRIX - weight matrix from each element of `variable ` to each other; if a matrix other + weight matrix from each element of `variable ` to each other; if a matrix other than HOLLOW_MATRIX is assigned, it is convolved with HOLLOW_MATRIX to eliminate self-connections from the stability calculation. @@ -351,8 +351,8 @@ def _instantiate_attributes_before_function(self, function=None, context=None): if isinstance(matrix, MappingProjection): matrix = matrix._parameter_ports[MATRIX] - elif isinstance(matrix, ParameterPort): - pass + # elif isinstance(matrix, ParameterPort): + # pass else: matrix = get_matrix(matrix, size, size) @@ -364,9 +364,13 @@ def _instantiate_attributes_before_function(self, function=None, context=None): self.defaults.variable] if self.metric == ENTROPY: - self.metric_fct = Distance(default_variable=default_variable, metric=CROSS_ENTROPY, normalize=self.parameters.normalize.default_value) + self.metric_fct = Distance(default_variable=default_variable, + metric=CROSS_ENTROPY, + normalize=self.parameters.normalize.default_value) elif self.metric in DISTANCE_METRICS._set(): - self.metric_fct = Distance(default_variable=default_variable, metric=self.metric, normalize=self.parameters.normalize.default_value) + self.metric_fct = Distance(default_variable=default_variable, + metric=self.metric, + normalize=self.parameters.normalize.default_value) else: assert False, "Unknown metric" @@ -462,6 +466,8 @@ def _function(self, # MODIFIED 6/12/19 END matrix = self._get_current_parameter_value(MATRIX, context) + if matrix is None: + matrix = self.matrix current = variable @@ -538,7 +544,7 @@ class Energy(Stability): length of array for which energy is calculated. matrix : list, np.ndarray, or matrix keyword - weight matrix from each element of `variable ` to each other; if a matrix other + weight matrix from each element of `variable ` to each other; if a matrix other than INVERSE_HOLLOW_MATRIX is assigned, it is convolved with HOLLOW_MATRIX to eliminate self-connections from the energy calculation. @@ -566,7 +572,7 @@ def __init__(self, default_variable=None, input_shapes=None, normalize:bool=None, - # transfer_fct=None, + transfer_fct=None, matrix=None, params=None, owner=None, @@ -575,20 +581,20 @@ def __init__(self, super().__init__( default_variable=default_variable, input_shapes=input_shapes, - metric=ENERGY, - matrix=matrix, - # transfer_fct=transfer_fct, - normalize=normalize, - params=params, - owner=owner, - prefs=prefs) + metric=ENERGY, + matrix=matrix, + transfer_fct=transfer_fct, + normalize=normalize, + params=params, + owner=owner, + prefs=prefs) class Entropy(Stability): """ Entropy( \ default_variable=None, \ - input_shapes=None, \ + input_shapes=None, \ matrix=INVERSE_HOLLOW_MATRIX, \ transfer_fct=None \ normalize=False, \ @@ -648,7 +654,7 @@ class Entropy(Stability): length of array for which energy is calculated. matrix : list, np.ndarray, or matrix keyword - weight matrix from each element of `variable ` to each other; if a matrix other + weight matrix from each element of `variable ` to each other; if a matrix other than INVERSE_HOLLOW_MATRIX is assigned, it is convolved with HOLLOW_MATRIX to eliminate self-connections from the entropy calculation. @@ -674,7 +680,9 @@ class Entropy(Stability): @check_user_specified def __init__(self, default_variable=None, + input_shapes=None, normalize:bool=None, + matrix=None, transfer_fct=None, params=None, owner=None, @@ -682,13 +690,14 @@ def __init__(self, super().__init__( default_variable=default_variable, - # matrix=matrix, - metric=ENTROPY, - transfer_fct=transfer_fct, - normalize=normalize, - params=params, - owner=owner, - prefs=prefs) + input_shapes=input_shapes, + metric=ENTROPY, + matrix=matrix, + transfer_fct=transfer_fct, + normalize=normalize, + params=params, + owner=owner, + prefs=prefs) class Distance(ObjectiveFunction): diff --git a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py index ff58048f45..b05f2d7785 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py @@ -193,10 +193,11 @@ from psyneulink.core import llvm as pnlvm from psyneulink.core.components.component import _get_parametervalue_attr +from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear from psyneulink.core.components.functions.nonstateful.transformfunctions import LinearCombination from psyneulink.core.components.functions.function import Function, get_matrix from psyneulink.core.components.functions.nonstateful.learningfunctions import Hebbian -from psyneulink.core.components.functions.nonstateful.objectivefunctions import Stability +from psyneulink.core.components.functions.nonstateful.objectivefunctions import Stability, Energy, Entropy from psyneulink.core.components.functions.stateful.integratorfunctions import AdaptiveIntegrator from psyneulink.core.components.functions.userdefinedfunction import UserDefinedFunction from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base, MechanismError @@ -210,7 +211,8 @@ from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection from psyneulink.core.globals.context import handle_external_context from psyneulink.core.globals.keywords import \ - AUTO, ENERGY, ENTROPY, HETERO, HOLLOW_MATRIX, INPUT_PORT, MATRIX, NAME, RECURRENT_TRANSFER_MECHANISM, RESULT + (AUTO, ENERGY, ENTROPY, FUNCTION, HETERO, HOLLOW_MATRIX, INPUT_PORT, + MATRIX, NAME, RECURRENT_TRANSFER_MECHANISM, RESULT) from psyneulink.core.globals.parameters import Parameter, SharedParameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.registry import register_instance, remove_instance_from_registry @@ -243,7 +245,6 @@ ENTROPY_OUTPUT_PORT_NAME=ENTROPY - class RecurrentTransferError(MechanismError): pass @@ -518,13 +519,13 @@ class RecurrentTransferMechanism(TransferMechanism): *ENERGY* : float the energy of the elements in the LCAMechanism's `value `, - calculated using the `Stability` Function using the `ENERGY` metric. + calculated using the `Stability` Function with the `ENERGY` metric. .. _LCAMechanism_ENTROPY: *ENTROPY* : float the entropy of the elements in the LCAMechanism's `value `, - calculated using the `Stability` Function using the `ENTROPY ` metric. + calculated using the `Stability` Function with the `ENTROPY ` metric. Returns ------- @@ -533,6 +534,11 @@ class RecurrentTransferMechanism(TransferMechanism): """ componentType = RECURRENT_TRANSFER_MECHANISM + standard_output_ports = TransferMechanism.standard_output_ports.copy() + standard_output_ports.extend([{NAME:ENERGY_OUTPUT_PORT_NAME}, {NAME:ENTROPY_OUTPUT_PORT_NAME}]) + standard_output_port_names = TransferMechanism.standard_output_port_names.copy() + standard_output_port_names.extend([ENERGY_OUTPUT_PORT_NAME, ENTROPY_OUTPUT_PORT_NAME]) + class Parameters(TransferMechanism.Parameters): """ Attributes @@ -637,11 +643,6 @@ class Parameters(TransferMechanism.Parameters): ) recurrent_projection = Parameter(None, stateful=False, loggable=False, structural=True) - standard_output_ports = TransferMechanism.standard_output_ports.copy() - standard_output_ports.extend([{NAME:ENERGY_OUTPUT_PORT_NAME}, {NAME:ENTROPY_OUTPUT_PORT_NAME}]) - standard_output_port_names = TransferMechanism.standard_output_port_names.copy() - standard_output_port_names.extend([ENERGY_OUTPUT_PORT_NAME, ENTROPY_OUTPUT_PORT_NAME]) - @check_user_specified @beartype def __init__(self, @@ -952,9 +953,20 @@ def _instantiate_attributes_after_function(self, context=None): """ from psyneulink.library.components.projections.pathway.autoassociativeprojection import AutoAssociativeProjection + matrix = self.parameters.matrix._get(context) + + # Now that matrix and default_variable size are known, + # instantiate functions for ENERGY and ENTROPY standard_output_ports + if ENERGY_OUTPUT_PORT_NAME in self.output_ports: + energy_idx = self.standard_output_port_names.index(ENERGY_OUTPUT_PORT_NAME) + self.standard_output_ports[energy_idx][FUNCTION] = Energy(self.defaults.variable, + matrix=matrix) + if ENTROPY_OUTPUT_PORT_NAME in self.output_ports: + energy_idx = self.standard_output_port_names.index(ENTROPY_OUTPUT_PORT_NAME) + self.standard_output_ports[energy_idx][FUNCTION] = Entropy(self.defaults.variable) + super()._instantiate_attributes_after_function(context=context) - matrix = self.parameters.matrix._get(context) # (7/19/17 CW) this line of code is now questionable, given the changes to matrix and the recurrent projection if isinstance(matrix, AutoAssociativeProjection): self.recurrent_projection = matrix @@ -974,23 +986,6 @@ def _instantiate_attributes_after_function(self, context=None): if self.learning_enabled: self.configure_learning(context=context) - if ENERGY_OUTPUT_PORT_NAME in self.output_ports.names: - energy = Stability(self.defaults.variable[0], - metric=ENERGY, - transfer_fct=self.function, - matrix=self.recurrent_projection._parameter_ports[MATRIX]) - self.output_ports[ENERGY_OUTPUT_PORT_NAME]._calculate = energy.function - - if ENTROPY_OUTPUT_PORT_NAME in self.output_ports.names: - if self.function.bounds == (0,1) or self.clip == (0,1): - entropy = Stability(self.defaults.variable[0], - metric=ENTROPY, - transfer_fct=self.function, - matrix=self.recurrent_projection._parameter_ports[MATRIX]) - self.output_ports[ENTROPY_OUTPUT_PORT_NAME]._calculate = entropy.function - else: - del self.output_ports[ENTROPY_OUTPUT_PORT_NAME] - def _update_parameter_ports(self, runtime_params=None, context=None): for port in self._parameter_ports: # (8/2/17 CW) because the auto and hetero params are solely used by the AutoAssociativeProjection diff --git a/tests/mechanisms/test_recurrent_transfer_mechanism.py b/tests/mechanisms/test_recurrent_transfer_mechanism.py index 3ec955f88a..72868dadd4 100644 --- a/tests/mechanisms/test_recurrent_transfer_mechanism.py +++ b/tests/mechanisms/test_recurrent_transfer_mechanism.py @@ -1008,6 +1008,41 @@ def test_clip_2d_array(self): np.testing.assert_allclose(R.execute([[-5.0, -1.0, 5.0], [5.0, -5.0, 1.0], [1.0, 5.0, 5.0]]), [[-2.0, -1.0, 2.0], [2.0, -2.0, 1.0], [1.0, 2.0, 2.0]]) + +class TestStandardOutputPorts: + def test_rtn_energy(self): + """Test use of ENERGY OutputPort""" + # Get reference value + e = pnl.Energy(input_shapes=2, matrix=[[0,-1],[-1,0]]) + reference = e((0.5124973964842103,0.5124973964842103)) + assert reference == 0.26265358140309386 + + lca_mech = pnl.LCAMechanism( input_shapes=2, output_ports=[pnl.RESULT, pnl.ENERGY]) + comp = pnl.Composition(lca_mech) + result = comp.run(inputs=[1,1]) + energy_matrix = lca_mech.output_ports[1].function.matrix + energy_value = lca_mech.output_ports[1].value + assert (energy_matrix == [[0,-1],[-1,0]]).all() + assert energy_value == reference + assert (result[0] == [[0.5124973964842103,0.5124973964842103]]).all() + assert result[1] == reference + + def test_rtn_entropy(self): + """Test use of ENTROPY OutputPort""" + # Get reference value + e = pnl.Entropy(input_shapes=2) + reference = e((0.5124973964842103,0.5124973964842103)) + assert reference == 0.6851676585231217 + + lca_mech = pnl.LCAMechanism( input_shapes=2, output_ports=[pnl.RESULT, pnl.ENTROPY]) + comp = pnl.Composition(lca_mech) + result = comp.run(inputs=[1,1]) + entropy_value = lca_mech.output_ports[1].value + assert entropy_value == reference + assert (result[0] == [[0.5124973964842103,0.5124973964842103]]).all() + assert result[1] == reference + + @pytest.mark.composition class TestRecurrentInputPort: From 0b085b5e2cc4619e860d4c41e4418c74573746bd Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 15 Nov 2024 07:55:55 +0000 Subject: [PATCH 11/34] Mechanism: if any port has default_input, get variable from input_ports otherwise, calls to execute with no input may ignore port default_input in favor of execution with Mechanism.defaults.variable --- .../core/components/mechanisms/mechanism.py | 13 ++++-- tests/ports/test_input_ports.py | 46 +++++++++++++++++++ 2 files changed, 56 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index a63f68da69..5bc3beeede 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -2518,9 +2518,16 @@ def execute(self, # UPDATE VARIABLE and InputPort(s) # Executing or simulating Composition, so get input by updating input_ports - if (input is None - and (context.execution_phase is not ContextFlags.IDLE) - and (any((p.path_afferents or p.default_input) for p in self.input_ports))): + if ( + input is None + and ( + ( + context.execution_phase is not ContextFlags.IDLE + and any(p.path_afferents for p in self.input_ports) + ) + or any(p.default_input is not None for p in self.input_ports) + ) + ): variable = self._update_input_ports(runtime_port_params[INPUT_PORT_PARAMS], context) else: diff --git a/tests/ports/test_input_ports.py b/tests/ports/test_input_ports.py index 73a1df9f67..7519814e2f 100644 --- a/tests/ports/test_input_ports.py +++ b/tests/ports/test_input_ports.py @@ -170,3 +170,49 @@ def test_no_efferents(self): A.efferents = ['test'] assert 'InputPorts are not allowed to have \'efferents\' ' \ '(assignment attempted for Deferred Init InputPort).' in str(error.value) + + +class TestDefaultInput: + def test_default_input_standalone(self): + a = pnl.ProcessingMechanism( + input_ports=[ + {pnl.VARIABLE: [2], pnl.PARAMS: {pnl.DEFAULT_INPUT: pnl.DEFAULT_VARIABLE}} + ] + ) + np.testing.assert_array_equal(a.execute(), [[2]]) + + a.input_ports[0].defaults.variable = [3] + np.testing.assert_array_equal(a.execute(), [[3]]) + + def test_default_input_standalone_two_ports(self): + a = pnl.ProcessingMechanism(default_variable=[[1], [1]]) + a.input_ports[0].defaults.variable = [2] + a.input_ports[1].defaults.variable = [3] + + # no port default_input set, use mechanism.defaults.variable + np.testing.assert_array_equal(a.execute(), [[1], [1]]) + + # port has default_input set, so get variable from input ports. + # second port has no afferents, so it doesn't have a value + a.input_ports[0].parameters.default_input.set(pnl.DEFAULT_VARIABLE, override=True) + with pytest.raises( + pnl.FunctionError, match="may be due to missing afferent projection" + ): + a.execute() + + # both ports have default_input set, so use it + a.input_ports[1].parameters.default_input.set(pnl.DEFAULT_VARIABLE, override=True) + np.testing.assert_array_equal(a.execute(), [[2], [3]]) + + # as second check above. one port has default_input, other does + # not and has no afferents + a.input_ports[0].parameters.default_input.set(None, override=True) + with pytest.raises( + pnl.FunctionError, match="may be due to missing afferent projection" + ): + a.execute() + + # as first check above. no port default_input set, use + # mechanism.defaults.variable + a.input_ports[1].parameters.default_input.set(None, override=True) + np.testing.assert_array_equal(a.execute(), [[1], [1]]) From 06d9eee4ac421fa79e6cc9ecdd3401e77e5b0e64 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 12 Nov 2024 02:09:39 +0000 Subject: [PATCH 12/34] Port: use default_input before checking projections if a port has default_input, this value should be used instead of any values from incoming projections --- psyneulink/core/components/ports/port.py | 6 +-- tests/ports/test_input_ports.py | 59 ++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index 3388fb3417..d238a3db4f 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -2102,14 +2102,14 @@ def set_projection_value(projection, value, context): def _execute(self, variable=None, context=None, runtime_params=None): if variable is None: + if hasattr(self, DEFAULT_INPUT) and self.default_input == DEFAULT_VARIABLE: + return copy_parameter_value(self.defaults.variable) + variable = self._get_variable_from_projections(context) # if the fallback is also None # return None, so that this port is ignored - # KDM 8/2/19: double check the relevance of this branch if variable is None: - if hasattr(self, DEFAULT_INPUT) and self.default_input == DEFAULT_VARIABLE: - return copy_parameter_value(self.defaults.variable) return None return super()._execute( diff --git a/tests/ports/test_input_ports.py b/tests/ports/test_input_ports.py index 7519814e2f..204d27f4bc 100644 --- a/tests/ports/test_input_ports.py +++ b/tests/ports/test_input_ports.py @@ -216,3 +216,62 @@ def test_default_input_standalone_two_ports(self): # mechanism.defaults.variable a.input_ports[1].parameters.default_input.set(None, override=True) np.testing.assert_array_equal(a.execute(), [[1], [1]]) + + def test_default_input_with_projection(self): + a = pnl.ProcessingMechanism(default_variable=[[1]]) + b = pnl.ProcessingMechanism( + input_ports=[ + {pnl.VARIABLE: [2], pnl.PARAMS: {pnl.DEFAULT_INPUT: pnl.DEFAULT_VARIABLE}} + ] + ) + comp = pnl.Composition(pathways=[a, b]) + + inputs = {a: [[10]]} + + np.testing.assert_array_equal(comp.execute(), [[2]]) + np.testing.assert_array_equal(comp.run(inputs), [[2]]) + + b.input_ports[0].defaults.variable = [3] + np.testing.assert_array_equal(comp.execute(), [[3]]) + np.testing.assert_array_equal(comp.run(inputs), [[3]]) + + b.input_ports[0].parameters.default_input.set(None, override=True) + np.testing.assert_array_equal(comp.execute(), [[1]]) + np.testing.assert_array_equal(comp.run(inputs), [[10]]) + + def test_default_input_with_projections_two_ports(self): + a = pnl.ProcessingMechanism(default_variable=[[1]]) + b = pnl.ProcessingMechanism( + input_ports=[ + {pnl.VARIABLE: [2]}, + {pnl.VARIABLE: [3], pnl.PARAMS: {pnl.DEFAULT_INPUT: pnl.DEFAULT_VARIABLE}}, + ] + ) + comp = pnl.Composition() + comp.add_nodes([a, b]) + comp.add_projection(sender=a, receiver=b.input_ports[0]) + + inputs = {a: [[10]]} + + np.testing.assert_array_equal(comp.run(inputs), [[10], [3]]) + + b.input_ports[0].parameters.default_input.set(pnl.DEFAULT_VARIABLE, override=True) + np.testing.assert_array_equal(comp.run(inputs), [[2], [3]]) + + b.input_ports[0].defaults.variable = [4] + np.testing.assert_array_equal(comp.run(inputs), [[4], [3]]) + + b.input_ports[1].defaults.variable = [5] + np.testing.assert_array_equal(comp.run(inputs), [[4], [5]]) + + b.input_ports[0].parameters.default_input.set(None, override=True) + np.testing.assert_array_equal(comp.run(inputs), [[10], [5]]) + + b.input_ports[1].parameters.default_input.set(None, override=True) + with pytest.raises( + pnl.FunctionError, match="may be due to missing afferent projection" + ): + with pytest.warns( + UserWarning, match="'ProcessingMechanism-1' doesn't have any afferent Projections" + ): + comp.run(inputs) From 272f3d184a7ec6131ce726321a4606b3adc05149 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Nov 2024 09:42:57 -0500 Subject: [PATCH 13/34] requirements: update grpcio requirement from <1.68.0 to <1.69.0 (#3120) Updates the requirements on [grpcio](https://github.com/grpc/grpc) to permit the latest version. - [Release notes](https://github.com/grpc/grpc/releases) - [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md) - [Commits](https://github.com/grpc/grpc/compare/v1.67.0...v1.68.0) --- updated-dependencies: - dependency-name: grpcio dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 644ccbcef3..3c864dffe6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ dill<0.3.10 fastkde>=2.0.0, <2.0.2 graph-scheduler>=1.2.1, <1.3.0 graphviz<0.21.0 -grpcio<1.68.0 +grpcio<1.69.0 leabra-psyneulink<0.3.3 llvmlite<0.44 matplotlib<3.7.6 From a60f94b239e662ac70cf9984f863d4c29cb777d3 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 11 Nov 2024 16:26:43 -0500 Subject: [PATCH 14/34] tests/OneHot: Add mode=DETERMINISTIC tests Signed-off-by: Jan Vesely --- tests/functions/test_selection.py | 254 ++++++++++++++++++++++++++---- 1 file changed, 225 insertions(+), 29 deletions(-) diff --git a/tests/functions/test_selection.py b/tests/functions/test_selection.py index aa238af2bf..e033281afd 100644 --- a/tests/functions/test_selection.py +++ b/tests/functions/test_selection.py @@ -1,7 +1,7 @@ import numpy as np import pytest -import psyneulink.core.components.functions.nonstateful.selectionfunctions as Functions +import psyneulink as pnl import psyneulink.core.globals.keywords as kw from psyneulink.core.globals.utilities import _SeededPhilox @@ -23,34 +23,34 @@ llvm_res['fp32'][expected_philox_ind] = (1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) test_data = [ - pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MAX}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot ARG_MAX"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MAX_ABS}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot ARG MAX_ABS"), - pytest.param(Functions.OneHot, -test_var, {'mode':kw.ARG_MAX_ABS}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot ARG MAX_ABS Neg"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MAX_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot ARG_MAX_INDICATOR"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot ARG_MAX_ABS_INDICATOR"), - pytest.param(Functions.OneHot, -test_var, {'mode':kw.ARG_MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot ARG_MAX_ABS_INDICATOR Neg"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MIN}, (0., 0., 0., 0., 0., 0., 0., 0., 0, -0.23311696), id="OneHot ARG_MIN"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MIN_ABS}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS"), - pytest.param(Functions.OneHot, -test_var, {'mode':kw.ARG_MIN_ABS}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS Neg"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MIN_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 0., 1.), id="OneHot ARG_MIN_INDICATOR"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS_INDICATOR"), - pytest.param(Functions.OneHot, -test_var, {'mode':kw.ARG_MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS_INDICATOR Neg"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_VAL"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_VAL"), - pytest.param(Functions.OneHot, -test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_VAL Neg"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_INDICATOR"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_INDICATOR"), - pytest.param(Functions.OneHot, -test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_INDICATOR Neg"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0., -0.23311696), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_VAL"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_VAL"), - pytest.param(Functions.OneHot, -test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_VAL Neg"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 0., 1.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_INDICATOR"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_INDICATOR"), - pytest.param(Functions.OneHot, -test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_INDICATOR Neg"), - pytest.param(Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB}, (0., 0., 0., 0.08976636599379373, 0., 0., 0., 0., 0., 0.), id="OneHot PROB"), - pytest.param(Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB_INDICATOR}, (0., 0., 0., 1., 0., 0., 0., 0., 0., 0.), id="OneHot PROB_INDICATOR"), - pytest.param(Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB}, expected_philox_prob, id="OneHot PROB Philox"), - pytest.param(Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB_INDICATOR}, expected_philox_ind, id="OneHot PROB_INDICATOR Philox"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.ARG_MAX}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot ARG_MAX"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.ARG_MAX_ABS}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot ARG MAX_ABS"), + pytest.param(pnl.OneHot, -test_var, {'mode':kw.ARG_MAX_ABS}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot ARG MAX_ABS Neg"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.ARG_MAX_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot ARG_MAX_INDICATOR"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.ARG_MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot ARG_MAX_ABS_INDICATOR"), + pytest.param(pnl.OneHot, -test_var, {'mode':kw.ARG_MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot ARG_MAX_ABS_INDICATOR Neg"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.ARG_MIN}, (0., 0., 0., 0., 0., 0., 0., 0., 0, -0.23311696), id="OneHot ARG_MIN"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.ARG_MIN_ABS}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS"), + pytest.param(pnl.OneHot, -test_var, {'mode':kw.ARG_MIN_ABS}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS Neg"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.ARG_MIN_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 0., 1.), id="OneHot ARG_MIN_INDICATOR"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.ARG_MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS_INDICATOR"), + pytest.param(pnl.OneHot, -test_var, {'mode':kw.ARG_MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS_INDICATOR Neg"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MAX_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_VAL"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_VAL"), + pytest.param(pnl.OneHot, -test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_VAL Neg"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MAX_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_INDICATOR"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_INDICATOR"), + pytest.param(pnl.OneHot, -test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_INDICATOR Neg"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MIN_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0., -0.23311696), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_VAL"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_VAL"), + pytest.param(pnl.OneHot, -test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_VAL Neg"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MIN_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 0., 1.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_INDICATOR"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_INDICATOR"), + pytest.param(pnl.OneHot, -test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_INDICATOR Neg"), + pytest.param(pnl.OneHot, [test_var, test_prob], {'mode':kw.PROB}, (0., 0., 0., 0.08976636599379373, 0., 0., 0., 0., 0., 0.), id="OneHot PROB"), + pytest.param(pnl.OneHot, [test_var, test_prob], {'mode':kw.PROB_INDICATOR}, (0., 0., 0., 1., 0., 0., 0., 0., 0., 0.), id="OneHot PROB_INDICATOR"), + pytest.param(pnl.OneHot, [test_var, test_philox], {'mode':kw.PROB}, expected_philox_prob, id="OneHot PROB Philox"), + pytest.param(pnl.OneHot, [test_var, test_philox], {'mode':kw.PROB_INDICATOR}, expected_philox_ind, id="OneHot PROB_INDICATOR Philox"), ] @@ -77,3 +77,199 @@ def test_basic(func, variable, params, expected, benchmark, func_mode): res = benchmark(EX, variable) np.testing.assert_allclose(res, expected) + + +test_var3 = np.append(np.append(test_var, test_var), test_var) +test_var_2d = np.atleast_2d(test_var) +test_var3_2d = np.append(np.append(test_var_2d, test_var_2d, axis=0), test_var_2d, axis=0) + + +@pytest.mark.benchmark +@pytest.mark.llvm_not_implemented +@pytest.mark.parametrize("variable, direction, abs_val, tie, expected", +[ + # simple + *[(test_var, kw.MAX, "absolute", tie, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + *[(test_var, kw.MAX, "original", tie, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + *[(test_var, kw.MIN, "absolute", tie, [0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + *[(test_var, kw.MIN, "original", tie, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + + # negated + *[(-test_var, kw.MAX, "absolute", tie, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + *[(-test_var, kw.MAX, "original", tie, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2331169623484446]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + *[(-test_var, kw.MIN, "absolute", tie, [0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + *[(-test_var, kw.MIN, "original", tie, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.9273255210020586, 0.0]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + + # 2d + *[(test_var_2d, kw.MAX, "absolute", tie, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0]]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + *[(test_var_2d, kw.MAX, "original", tie, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0]]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + *[(test_var_2d, kw.MIN, "absolute", tie, [[0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + *[(test_var_2d, kw.MIN, "original", tie, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446]]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + + # 2d negated + *[(-test_var_2d, kw.MAX, "absolute", tie, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0]]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + *[(-test_var_2d, kw.MAX, "original", tie, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2331169623484446]]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + *[(-test_var_2d, kw.MIN, "absolute", tie, [[0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + *[(-test_var_2d, kw.MIN, "original", tie, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.9273255210020586, 0.0]]) for tie in [kw.FIRST,kw.LAST,kw.RANDOM,kw.ALL]], + + # multiple extreme values + *[(test_var3, kw.MAX, abs_val, kw.FIRST, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) + for abs_val in ("absolute", "original")], + *[(test_var3, kw.MAX, abs_val, kw.LAST, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0]) + for abs_val in ("absolute", "original")], + *[(test_var3, kw.MAX, abs_val, kw.RANDOM, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) + for abs_val in ("absolute", "original")], + *[(test_var3, kw.MAX, abs_val, kw.ALL, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0]) + for abs_val in ("absolute", "original")], + + (test_var3, kw.MIN, "absolute", kw.FIRST, [0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (test_var3, kw.MIN, "absolute", kw.LAST, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (test_var3, kw.MIN, "absolute", kw.RANDOM, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (test_var3, kw.MIN, "absolute", kw.ALL, [0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + + (test_var3, kw.MIN, "original", kw.FIRST, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (test_var3, kw.MIN, "original", kw.LAST, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446]), + (test_var3, kw.MIN, "original", kw.RANDOM, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (test_var3, kw.MIN, "original", kw.ALL, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446]), + + # multiple extreme values negated + (-test_var3, kw.MAX, "absolute", kw.FIRST, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (-test_var3, kw.MAX, "absolute", kw.LAST, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0]), + (-test_var3, kw.MAX, "absolute", kw.RANDOM, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (-test_var3, kw.MAX, "absolute", kw.ALL, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0]), + + (-test_var3, kw.MAX, "original", kw.FIRST, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2331169623484446, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (-test_var3, kw.MAX, "original", kw.LAST, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2331169623484446]), + (-test_var3, kw.MAX, "original", kw.RANDOM, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2331169623484446, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (-test_var3, kw.MAX, "original", kw.ALL, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2331169623484446, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2331169623484446, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2331169623484446]), + + (-test_var3, kw.MIN, "absolute", kw.FIRST, [0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (-test_var3, kw.MIN, "absolute", kw.LAST, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (-test_var3, kw.MIN, "absolute", kw.RANDOM, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (-test_var3, kw.MIN, "absolute", kw.ALL, [0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + + (-test_var3, kw.MIN, "original", kw.FIRST, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.9273255210020586, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (-test_var3, kw.MIN, "original", kw.LAST, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.9273255210020586, 0.0]), + (-test_var3, kw.MIN, "original", kw.RANDOM, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.9273255210020586, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + (-test_var3, kw.MIN, "original", kw.ALL, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.9273255210020586, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.9273255210020586, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.9273255210020586, 0.0]), + + # multiple extreme values 2d + *[(test_var3_2d, kw.MAX, abs_val, kw.FIRST, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) + for abs_val in ("absolute", "original")], + *[(test_var3_2d, kw.MAX, abs_val, kw.LAST, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0]]) + for abs_val in ("absolute", "original")], + *[(test_var3_2d, kw.MAX, abs_val, kw.RANDOM, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) + for abs_val in ("absolute", "original")], + *[(test_var3_2d, kw.MAX, abs_val, kw.ALL, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9273255210020586, 0.0]]) + for abs_val in ("absolute", "original")], + + (test_var3_2d, kw.MIN, "absolute", kw.FIRST, [[0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]), + (test_var3_2d, kw.MIN, "absolute", kw.LAST, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]), + (test_var3_2d, kw.MIN, "absolute", kw.RANDOM, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]), + (test_var3_2d, kw.MIN, "absolute", kw.ALL, [[0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.08976636599379373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]), + + (test_var3_2d, kw.MIN, "original", kw.FIRST, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]), + (test_var3_2d, kw.MIN, "original", kw.LAST, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446]]), + (test_var3_2d, kw.MIN, "original", kw.RANDOM, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]), + (test_var3_2d, kw.MIN, "original", kw.ALL, [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2331169623484446]]), +], ids=lambda x: x if isinstance(x, str) else str(getattr(x, 'shape', '')) ) +@pytest.mark.parametrize("indicator", ["indicator", "value"]) +def test_one_hot_mode_deterministic(benchmark, variable, tie, indicator, direction, abs_val, expected, func_mode): + + f = pnl.OneHot(default_variable=np.zeros_like(variable), + mode=kw.DETERMINISTIC, + tie=tie, + indicator=indicator=="indicator", + abs_val=abs_val=="absolute", + direction=direction, + seed=5) # seed to select middle of the 3 ties + + EX = pytest.helpers.get_func_execution(f, func_mode) + + EX(variable) + res = benchmark(EX, variable) + + if indicator == "indicator": + expected = np.where(np.asarray(expected) != 0, np.ones_like(expected), expected) + + np.testing.assert_allclose(res, expected) From ccd7f6728a769f39b1df5bccd715b0a16fccd11d Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 12 Nov 2024 12:00:33 -0500 Subject: [PATCH 15/34] llvm/OneHot: Isolate handling of PROB and PROB_INDICATOR modes The other modes will be update to match generalized Python behaviour. Signed-off-by: Jan Vesely --- .../nonstateful/selectionfunctions.py | 78 ++++++++++++------- 1 file changed, 52 insertions(+), 26 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index a7767e505e..e1d0fdcf3c 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -421,10 +421,10 @@ def _validate_params(self, request_set, target_set=None, context=None): f"cannot be specified.") def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): - best_idx_ptr = builder.alloca(ctx.int32_ty) - builder.store(best_idx_ptr.type.pointee(0), best_idx_ptr) - if self.mode in {PROB, PROB_INDICATOR}: + best_idx_ptr = builder.alloca(ctx.int32_ty) + builder.store(best_idx_ptr.type.pointee(0), best_idx_ptr) + sum_ptr = builder.alloca(ctx.float_ty) builder.store(sum_ptr.type.pointee(-0.0), sum_ptr) @@ -437,6 +437,51 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, prob_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(1)]) arg_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) + with pnlvm.helpers.array_ptr_loop(builder, arg_in, "search") as (b1, idx): + best_idx = b1.load(best_idx_ptr) + best_ptr = b1.gep(arg_in, [ctx.int32_ty(0), best_idx]) + + current_ptr = b1.gep(arg_in, [ctx.int32_ty(0), idx]) + current = b1.load(current_ptr) + + # Update prefix sum + current_prob_ptr = b1.gep(prob_in, [ctx.int32_ty(0), idx]) + sum_old = b1.load(sum_ptr) + sum_new = b1.fadd(sum_old, b1.load(current_prob_ptr)) + b1.store(sum_new, sum_ptr) + + old_below = b1.fcmp_ordered("<=", sum_old, random_draw) + new_above = b1.fcmp_ordered("<", random_draw, sum_new) + cond = b1.and_(new_above, old_below) + + cmp_prev = current.type(1.0) + cmp_curr = b1.select(cond, cmp_prev, cmp_prev.type(0.0)) + cmp_op = "==" + if self.mode == PROB: + val = current + else: + val = current.type(1.0) + + prev_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), best_idx]) + cur_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), idx]) + + # Make sure other elements are zeroed + builder.store(cur_res_ptr.type.pointee(0), cur_res_ptr) + + cmp_res = builder.fcmp_unordered(cmp_op, cmp_curr, cmp_prev) + with builder.if_then(cmp_res): + builder.store(prev_res_ptr.type.pointee(0), prev_res_ptr) + builder.store(val, cur_res_ptr) + builder.store(idx, best_idx_ptr) + + return builder + + elif self.mode == DETERMINISTIC: + assert False, "DETERMINISTIC mode not supported" + + best_idx_ptr = builder.alloca(ctx.int32_ty) + builder.store(best_idx_ptr.type.pointee(0), best_idx_ptr) + with pnlvm.helpers.array_ptr_loop(builder, arg_in, "search") as (b1, idx): best_idx = b1.load(best_idx_ptr) best_ptr = b1.gep(arg_in, [ctx.int32_ty(0), best_idx]) @@ -444,13 +489,12 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, current_ptr = b1.gep(arg_in, [ctx.int32_ty(0), idx]) current = b1.load(current_ptr) - if self.mode not in {PROB, PROB_INDICATOR}: - fabs = ctx.get_builtin("fabs", [current.type]) + fabs = ctx.get_builtin("fabs", [current.type]) - is_first = b1.icmp_unsigned("==", idx, idx.type(0)) + is_first = b1.icmp_unsigned("==", idx, idx.type(0)) - # Allow the first element to win the comparison - prev_best = b1.select(is_first, best_ptr.type.pointee(float("NaN")), b1.load(best_ptr)) + # Allow the first element to win the comparison + prev_best = b1.select(is_first, best_ptr.type.pointee(float("NaN")), b1.load(best_ptr)) if self.mode == ARG_MAX: cmp_op = ">" @@ -500,24 +544,6 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, cmp_curr = b1.call(fabs, [current]) val = current.type(1.0) - elif self.mode in {PROB, PROB_INDICATOR}: - # Update prefix sum - current_prob_ptr = b1.gep(prob_in, [ctx.int32_ty(0), idx]) - sum_old = b1.load(sum_ptr) - sum_new = b1.fadd(sum_old, b1.load(current_prob_ptr)) - b1.store(sum_new, sum_ptr) - - old_below = b1.fcmp_ordered("<=", sum_old, random_draw) - new_above = b1.fcmp_ordered("<", random_draw, sum_new) - cond = b1.and_(new_above, old_below) - - cmp_prev = current.type(1.0) - cmp_curr = b1.select(cond, cmp_prev, cmp_prev.type(0.0)) - cmp_op = "==" - if self.mode == PROB: - val = current - else: - val = current.type(1.0) else: assert False, "Unsupported mode in LLVM: {} for OneHot Function".format(self.mode) From 09286c77f71efb789b2f40ad7c98e26427fc75ac Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 12 Nov 2024 13:29:07 -0500 Subject: [PATCH 16/34] llvm/OneHot: Refactor to match Python behaviour for modes != DETERMINISTIC Allow output of multiple extremes. Enable tests. Still TODO: * mode==DETERMINISTIC * tie==RANDOM (only used with mode==DETERMINISTIC) * 2d arguments Signed-off-by: Jan Vesely --- .../nonstateful/selectionfunctions.py | 158 +++++++++--------- tests/functions/test_selection.py | 24 +-- 2 files changed, 95 insertions(+), 87 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index e1d0fdcf3c..54d3d023dd 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -479,85 +479,93 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, elif self.mode == DETERMINISTIC: assert False, "DETERMINISTIC mode not supported" - best_idx_ptr = builder.alloca(ctx.int32_ty) - builder.store(best_idx_ptr.type.pointee(0), best_idx_ptr) - - with pnlvm.helpers.array_ptr_loop(builder, arg_in, "search") as (b1, idx): - best_idx = b1.load(best_idx_ptr) - best_ptr = b1.gep(arg_in, [ctx.int32_ty(0), best_idx]) - - current_ptr = b1.gep(arg_in, [ctx.int32_ty(0), idx]) - current = b1.load(current_ptr) - - fabs = ctx.get_builtin("fabs", [current.type]) - - is_first = b1.icmp_unsigned("==", idx, idx.type(0)) - - # Allow the first element to win the comparison - prev_best = b1.select(is_first, best_ptr.type.pointee(float("NaN")), b1.load(best_ptr)) - - if self.mode == ARG_MAX: - cmp_op = ">" - cmp_prev = prev_best - cmp_curr = current - val = current - - elif self.mode == ARG_MAX_ABS: - cmp_op = ">" - cmp_prev = b1.call(fabs, [prev_best]) - cmp_curr = b1.call(fabs, [current]) - val = b1.call(fabs, [current]) - - elif self.mode == ARG_MAX_INDICATOR: - cmp_op = ">" - cmp_prev = prev_best - cmp_curr = current - val = current.type(1.0) - - elif self.mode == ARG_MAX_ABS_INDICATOR: - cmp_op = ">" - cmp_prev = b1.call(fabs, [prev_best]) - cmp_curr = b1.call(fabs, [current]) - val = current.type(1.0) - - elif self.mode == ARG_MIN: - cmp_op = "<" - cmp_prev = prev_best - cmp_curr = current - val = current - - elif self.mode == ARG_MIN_ABS: - cmp_op = "<" - cmp_prev = b1.call(fabs, [prev_best]) - cmp_curr = b1.call(fabs, [current]) - val = b1.call(fabs, [current]) - - elif self.mode == ARG_MIN_INDICATOR: - cmp_op = "<" - cmp_prev = prev_best - cmp_curr = current - val = current.type(1.0) - - elif self.mode == ARG_MIN_ABS_INDICATOR: - cmp_op = "<" - cmp_prev = b1.call(fabs, [prev_best]) - cmp_curr = b1.call(fabs, [current]) - val = current.type(1.0) + else: + direction, abs_val, indicator, tie = self._parse_mode(self.mode) + is_abs_val = ctx.bool_ty(abs_val) + is_indicator = ctx.bool_ty(indicator) - else: - assert False, "Unsupported mode in LLVM: {} for OneHot Function".format(self.mode) + num_extremes_ptr = builder.alloca(ctx.int32_ty) + builder.store(num_extremes_ptr.type.pointee(0), num_extremes_ptr) + + extreme_val_ptr = builder.alloca(ctx.float_ty) + builder.store(extreme_val_ptr.type.pointee(float("NaN")), extreme_val_ptr) + + fabs_f = ctx.get_builtin("fabs", [extreme_val_ptr.type.pointee]) + + with pnlvm.helpers.array_ptr_loop(builder, arg_in, "count_extremes") as (loop_builder, idx): + + current_ptr = loop_builder.gep(arg_in, [ctx.int32_ty(0), idx]) + current = loop_builder.load(current_ptr) + current_abs = loop_builder.call(fabs_f, [current]) + current = builder.select(is_abs_val, current_abs, current) + + old_extreme = loop_builder.load(extreme_val_ptr) + cmp_op = ">" if direction == MAX else "<" + is_new_extreme = loop_builder.fcmp_unordered(cmp_op, current, old_extreme) + + with loop_builder.if_then(is_new_extreme): + loop_builder.store(current, extreme_val_ptr) + loop_builder.store(num_extremes_ptr.type.pointee(1), num_extremes_ptr) + + is_old_extreme = loop_builder.fcmp_ordered("==", current, old_extreme) + with loop_builder.if_then(is_old_extreme): + extreme_count = loop_builder.load(num_extremes_ptr) + extreme_count = loop_builder.add(extreme_count, extreme_count.type(1)) + loop_builder.store(extreme_count, num_extremes_ptr) + + + if tie == FIRST: + extreme_start = num_extremes_ptr.type.pointee(0) + extreme_stop = num_extremes_ptr.type.pointee(1) + + elif tie == LAST: + extreme_stop = builder.load(num_extremes_ptr) + extreme_start = builder.sub(extreme_stop, extreme_stop.type(1)) + + elif tie == ALL: + extreme_start = num_extremes_ptr.type.pointee(0) + extreme_stop = builder.load(num_extremes_ptr) + + else: + assert False + + + extreme_val = builder.load(extreme_val_ptr) + extreme_write_val = builder.select(is_indicator, extreme_val.type(1), extreme_val) + next_extreme_ptr = builder.alloca(num_extremes_ptr.type.pointee) + builder.store(next_extreme_ptr.type.pointee(0), next_extreme_ptr) + + pnlvm.helpers.printf(ctx, + builder, + "{} replacing extreme values of %e from <%u,%u) out of %u\n".format(self.name), + extreme_val, + extreme_start, + extreme_stop, + builder.load(num_extremes_ptr), + tags={"one_hot"}) + + with pnlvm.helpers.array_ptr_loop(builder, arg_in, "mark_extremes") as (loop_builder, idx): + current_ptr = loop_builder.gep(arg_in, [ctx.int32_ty(0), idx]) + current = loop_builder.load(current_ptr) + current_abs = loop_builder.call(fabs_f, [current]) + current = builder.select(is_abs_val, current_abs, current) + + is_extreme = loop_builder.fcmp_ordered("==", current, extreme_val) + current_extreme_idx = loop_builder.load(next_extreme_ptr) + + with loop_builder.if_then(is_extreme): + next_extreme_idx = loop_builder.add(current_extreme_idx, current_extreme_idx.type(1)) + loop_builder.store(next_extreme_idx, next_extreme_ptr) - prev_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), best_idx]) - cur_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), idx]) + is_after_start = loop_builder.icmp_unsigned(">=", current_extreme_idx, extreme_start) + is_before_stop = loop_builder.icmp_unsigned("<", current_extreme_idx, extreme_stop) - # Make sure other elements are zeroed - builder.store(cur_res_ptr.type.pointee(0), cur_res_ptr) + should_write_extreme = loop_builder.and_(is_extreme, is_after_start) + should_write_extreme = loop_builder.and_(should_write_extreme, is_before_stop) - cmp_res = builder.fcmp_unordered(cmp_op, cmp_curr, cmp_prev) - with builder.if_then(cmp_res): - builder.store(prev_res_ptr.type.pointee(0), prev_res_ptr) - builder.store(val, cur_res_ptr) - builder.store(idx, best_idx_ptr) + write_value = loop_builder.select(should_write_extreme, extreme_write_val, extreme_write_val.type(0)) + out_ptr = loop_builder.gep(arg_out, [ctx.int32_ty(0), idx]) + loop_builder.store(write_value, out_ptr) return builder diff --git a/tests/functions/test_selection.py b/tests/functions/test_selection.py index e033281afd..039773c7ff 100644 --- a/tests/functions/test_selection.py +++ b/tests/functions/test_selection.py @@ -35,18 +35,18 @@ pytest.param(pnl.OneHot, test_var, {'mode':kw.ARG_MIN_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 0., 1.), id="OneHot ARG_MIN_INDICATOR"), pytest.param(pnl.OneHot, test_var, {'mode':kw.ARG_MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS_INDICATOR"), pytest.param(pnl.OneHot, -test_var, {'mode':kw.ARG_MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS_INDICATOR Neg"), - pytest.param(pnl.OneHot, test_var, {'mode':kw.MAX_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_VAL"), - pytest.param(pnl.OneHot, test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_VAL"), - pytest.param(pnl.OneHot, -test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_VAL Neg"), - pytest.param(pnl.OneHot, test_var, {'mode':kw.MAX_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_INDICATOR"), - pytest.param(pnl.OneHot, test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_INDICATOR"), - pytest.param(pnl.OneHot, -test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_INDICATOR Neg"), - pytest.param(pnl.OneHot, test_var, {'mode':kw.MIN_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0., -0.23311696), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_VAL"), - pytest.param(pnl.OneHot, test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_VAL"), - pytest.param(pnl.OneHot, -test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_VAL Neg"), - pytest.param(pnl.OneHot, test_var, {'mode':kw.MIN_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 0., 1.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_INDICATOR"), - pytest.param(pnl.OneHot, test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_INDICATOR"), - pytest.param(pnl.OneHot, -test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_INDICATOR Neg"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MAX_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot MAX_VAL"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot MAX_ABS_VAL"), + pytest.param(pnl.OneHot, -test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot MAX_ABS_VAL Neg"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MAX_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot MAX_INDICATOR"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot MAX_ABS_INDICATOR"), + pytest.param(pnl.OneHot, -test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot MAX_ABS_INDICATOR Neg"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MIN_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0., -0.23311696), id="OneHot MIN_VAL"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), id="OneHot MIN_ABS_VAL"), + pytest.param(pnl.OneHot, -test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), id="OneHot MIN_ABS_VAL Neg"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MIN_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 0., 1.), id="OneHot MIN_INDICATOR"), + pytest.param(pnl.OneHot, test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot MIN_ABS_INDICATOR"), + pytest.param(pnl.OneHot, -test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot MIN_ABS_INDICATOR Neg"), pytest.param(pnl.OneHot, [test_var, test_prob], {'mode':kw.PROB}, (0., 0., 0., 0.08976636599379373, 0., 0., 0., 0., 0., 0.), id="OneHot PROB"), pytest.param(pnl.OneHot, [test_var, test_prob], {'mode':kw.PROB_INDICATOR}, (0., 0., 0., 1., 0., 0., 0., 0., 0., 0.), id="OneHot PROB_INDICATOR"), pytest.param(pnl.OneHot, [test_var, test_philox], {'mode':kw.PROB}, expected_philox_prob, id="OneHot PROB Philox"), From a8d679ac8db5e388d03e30aadde9d8ec7251636e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 17 Nov 2024 13:42:50 -0500 Subject: [PATCH 17/34] llvm/DictionaryMemory: Remove OneHot mode workarounds Not needed since OneHot now supports all modes != DETERMINISTIC Cleanup codestyle. Signed-off-by: Jan Vesely --- .../functions/stateful/memoryfunctions.py | 49 +++++++------------ 1 file changed, 19 insertions(+), 30 deletions(-) diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index cd97cdb418..7466f968ab 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -2372,6 +2372,7 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, max_entries = len(vals_ptr.type.pointee) entries = builder.load(count_ptr) entries = pnlvm.helpers.uint_min(builder, entries, max_entries) + # The call to random function needs to be after check to match python with builder.if_then(retr_rand): rand_ptr = builder.alloca(ctx.float_ty) @@ -2385,53 +2386,41 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, with builder.if_then(retr, likely=True): # Determine distances distance_f = ctx.import_llvm_function(self.distance_function) - distance_params, distance_state = ctx.get_param_or_state_ptr(builder, self, "distance_function", param_struct_ptr=params, state_struct_ptr=state) + distance_params, distance_state = ctx.get_param_or_state_ptr(builder, + self, + "distance_function", + param_struct_ptr=params, + state_struct_ptr=state) distance_arg_in = builder.alloca(distance_f.args[2].type.pointee) - builder.store(builder.load(var_key_ptr), - builder.gep(distance_arg_in, [ctx.int32_ty(0), - ctx.int32_ty(0)])) + builder.store(builder.load(var_key_ptr), builder.gep(distance_arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)])) selection_arg_in = builder.alloca(pnlvm.ir.ArrayType(distance_f.args[3].type.pointee, max_entries)) with pnlvm.helpers.for_loop_zero_inc(builder, entries, "distance_loop") as (b, idx): compare_ptr = b.gep(keys_ptr, [ctx.int32_ty(0), idx]) - b.store(b.load(compare_ptr), - b.gep(distance_arg_in, [ctx.int32_ty(0), ctx.int32_ty(1)])) + b.store(b.load(compare_ptr), b.gep(distance_arg_in, [ctx.int32_ty(0), ctx.int32_ty(1)])) distance_arg_out = b.gep(selection_arg_in, [ctx.int32_ty(0), idx]) - b.call(distance_f, [distance_params, distance_state, - distance_arg_in, distance_arg_out]) - - # MODIFIED 10/13/24 NEW: - # IMPLEMENTATION NOTE: - # REPLACE MIN_VAL with ARG_MIN and MIN_INDICATOR with ARG_MIN_INDICATOR - # until the MIN_XXX args are implemented in LLVM - # since, at present, the tests don't seem to distinguish between these (i.e., return of multiple values; - # should add tests that do so once MIN_VAL and related args are implemented in LLVM) - if isinstance(self.selection_function, OneHot): - mode = self.selection_function.mode - if mode == MIN_VAL: - self.selection_function.mode = ARG_MIN - elif mode == MIN_INDICATOR: - self.selection_function.mode = ARG_MIN_INDICATOR - # MODIFIED 10/13/24 END + b.call(distance_f, [distance_params, distance_state, distance_arg_in, distance_arg_out]) + selection_f = ctx.import_llvm_function(self.selection_function) - selection_params, selection_state = ctx.get_param_or_state_ptr(builder, self, "selection_function", param_struct_ptr=params, state_struct_ptr=state) + selection_params, selection_state = ctx.get_param_or_state_ptr(builder, + self, + "selection_function", + param_struct_ptr=params, + state_struct_ptr=state) selection_arg_out = builder.alloca(selection_f.args[3].type.pointee) - builder.call(selection_f, [selection_params, selection_state, - selection_arg_in, selection_arg_out]) + builder.call(selection_f, [selection_params, selection_state, selection_arg_in, selection_arg_out]) # Find the selected index selected_idx_ptr = builder.alloca(ctx.int32_ty) builder.store(ctx.int32_ty(0), selected_idx_ptr) - with pnlvm.helpers.for_loop_zero_inc(builder, entries, "distance_loop") as (b,idx): + with pnlvm.helpers.for_loop_zero_inc(builder, entries, "selection_loop") as (b, idx): selection_val = b.load(b.gep(selection_arg_out, [ctx.int32_ty(0), idx])) non_zero = b.fcmp_ordered('!=', selection_val, selection_val.type(0)) with b.if_then(non_zero): b.store(idx, selected_idx_ptr) selected_idx = builder.load(selected_idx_ptr) - selected_key = builder.load(builder.gep(keys_ptr, [ctx.int32_ty(0), - selected_idx])) - selected_val = builder.load(builder.gep(vals_ptr, [ctx.int32_ty(0), - selected_idx])) + selected_key = builder.load(builder.gep(keys_ptr, [ctx.int32_ty(0), selected_idx])) + selected_val = builder.load(builder.gep(vals_ptr, [ctx.int32_ty(0), selected_idx])) builder.store(selected_key, out_key_ptr) builder.store(selected_val, out_val_ptr) From 0a4e94c15c3a5ddfd1cf78e128e51930f72e3988 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 20 Nov 2024 10:48:29 -0500 Subject: [PATCH 18/34] llvm: Convert recursive array iterator into contextmanager Remove call_elementwise_operation helper. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/helpers.py | 28 ++++++++--------- tests/llvm/test_helpers.py | 53 +++++---------------------------- 2 files changed, 21 insertions(+), 60 deletions(-) diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index c3dc3336bf..7d7b7df10a 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -377,23 +377,23 @@ def array_from_shape(shape, element_ty): array_ty = ir.ArrayType(array_ty, dim) return array_ty -def recursive_iterate_arrays(ctx, builder, u, *args): +@contextmanager +def recursive_iterate_arrays(ctx, builder, *args, loop_id="recursive_iteration"): """Recursively iterates over all elements in scalar arrays of the same shape""" - assert isinstance(u.type.pointee, ir.ArrayType), "Can only iterate over arrays!" + + assert len(args) > 0, "Need at least one array to iterate over!" + assert all(isinstance(arr.type.pointee, ir.ArrayType) for arr in args), "Can only iterate over arrays!" + + u = args[0] assert all(len(u.type.pointee) == len(v.type.pointee) for v in args), "Tried to iterate over differing lengths!" - with array_ptr_loop(builder, u, "recursive_iteration") as (b, idx): - u_ptr = b.gep(u, [ctx.int32_ty(0), idx]) - arg_ptrs = (b.gep(v, [ctx.int32_ty(0), idx]) for v in args) - if is_scalar(u_ptr): - yield (u_ptr, *arg_ptrs) - else: - yield from recursive_iterate_arrays(ctx, b, u_ptr, *arg_ptrs) -# TODO: Remove this function. Can be replaced by `recursive_iterate_arrays` -def call_elementwise_operation(ctx, builder, x, operation, output_ptr): - """Recurse through an array structure and call operation on each scalar element of the structure. Store result in output_ptr""" - for (inp_ptr, out_ptr) in recursive_iterate_arrays(ctx, builder, x, output_ptr): - builder.store(operation(ctx, builder, builder.load(inp_ptr)), out_ptr) + with array_ptr_loop(builder, u, loop_id) as (b, idx): + arg_ptrs = tuple(b.gep(arr, [ctx.int32_ty(0), idx]) for arr in args) + if is_scalar(arg_ptrs[0]): + yield (b, *arg_ptrs) + else: + with recursive_iterate_arrays(ctx, b, *arg_ptrs) as (b, *nested_args): + yield (b, *nested_args) def printf(ctx, builder, fmt, *args, tags:set): diff --git a/tests/llvm/test_helpers.py b/tests/llvm/test_helpers.py index fa43c7fd31..6653dfd408 100644 --- a/tests/llvm/test_helpers.py +++ b/tests/llvm/test_helpers.py @@ -447,54 +447,15 @@ def test_helper_numerical(mode, op, var, expected, fp_type): np.testing.assert_allclose(res, expected) -@pytest.mark.llvm -@pytest.mark.parametrize('mode', ['CPU', pytest.helpers.cuda_param('PTX')]) -@pytest.mark.parametrize('var,expected', [ - (np.asfarray([1,2,3]), np.asfarray([2,3,4])), - (np.asfarray([[1,2],[3,4]]), np.asfarray([[2,3],[4,5]])), -], ids=["vector", "matrix"]) -def test_helper_elementwise_op(mode, var, expected): - with pnlvm.LLVMBuilderContext.get_current() as ctx: - arr_ptr_ty = ctx.convert_python_struct_to_llvm_ir(var).as_pointer() - - func_ty = ir.FunctionType(ir.VoidType(), [arr_ptr_ty, arr_ptr_ty]) - - custom_name = ctx.get_unique_name("elementwise_op") - function = ir.Function(ctx.module, func_ty, name=custom_name) - inp, out = function.args - block = function.append_basic_block(name="entry") - builder = ir.IRBuilder(block) - - pnlvm.helpers.call_elementwise_operation(ctx, builder, inp, - lambda ctx, builder, x: builder.fadd(x.type(1.0), x), out) - builder.ret_void() - - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) - - vec = np.asfarray(var, dtype=bin_f.np_arg_dtypes[0].base) - res = bin_f.np_buffer_for_arg(1) - - if mode == 'CPU': - bin_f(vec, res) - else: - bin_f.cuda_wrap_call(vec, res) - - assert np.array_equal(res, expected) @pytest.mark.llvm @pytest.mark.parametrize('mode', ['CPU', pytest.helpers.cuda_param('PTX')]) @pytest.mark.parametrize('var1,var2,expected', [ (np.array([1.,2.,3.]), np.array([1.,2.,3.]), np.array([2.,4.,6.])), (np.array([1.,2.,3.]), np.array([0.,1.,2.]), np.array([1.,3.,5.])), - (np.array([[1.,2.,3.], - [4.,5.,6.], - [7.,8.,9.]]), - np.array([[10.,11.,12.], - [13.,14.,15.], - [16.,17.,18.]]), - np.array([[11.,13.,15.], - [17.,19.,21.], - [23.,25.,27.]])), + (np.array([[1.,2.,3.], [4.,5.,6.], [7.,8.,9.]]), + np.array([[10.,11.,12.], [13.,14.,15.], [16.,17.,18.]]), + np.array([[11.,13.,15.], [17.,19.,21.], [23.,25.,27.]])), ]) def test_helper_recursive_iterate_arrays(mode, var1, var2, expected): with pnlvm.LLVMBuilderContext.get_current() as ctx: @@ -508,10 +469,10 @@ def test_helper_recursive_iterate_arrays(mode, var1, var2, expected): block = function.append_basic_block(name="entry") builder = ir.IRBuilder(block) - for (a_ptr, b_ptr, o_ptr) in pnlvm.helpers.recursive_iterate_arrays(ctx, builder, u, v, out): - a = builder.load(a_ptr) - b = builder.load(b_ptr) - builder.store(builder.fadd(a,b), o_ptr) + with pnlvm.helpers.recursive_iterate_arrays(ctx, builder, u, v, out) as (loop_builder, a_ptr, b_ptr, o_ptr): + a = loop_builder.load(a_ptr) + b = loop_builder.load(b_ptr) + loop_builder.store(loop_builder.fadd(a, b), o_ptr) builder.ret_void() From 237af5f855c2559838ce6873a8eb68ca70138a95 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 20 Nov 2024 09:51:35 -0500 Subject: [PATCH 19/34] llvm/Component: Allow 'indicator' and 'abs_val' parameters in OneHot DETERMINISTIC mode Drop random_state and seed if "tie" is not RANDOM Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index fa00fbf1be..64e26e8b3d 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -528,14 +528,14 @@ Context, ContextError, ContextFlags, INITIALIZATION_STATUS_FLAGS, _get_time, handle_external_context from psyneulink.core.globals.mdf import MDFSerializable from psyneulink.core.globals.keywords import \ - CONTEXT, CONTROL_PROJECTION, DEFERRED_INITIALIZATION, EXECUTE_UNTIL_FINISHED, \ + CONTEXT, CONTROL_PROJECTION, DEFERRED_INITIALIZATION, DETERMINISTIC, EXECUTE_UNTIL_FINISHED, \ FUNCTION, FUNCTION_PARAMS, INIT_FULL_EXECUTE_METHOD, INPUT_PORTS, \ LEARNING, LEARNING_PROJECTION, MATRIX, MAX_EXECUTIONS_BEFORE_FINISHED, \ MODEL_SPEC_ID_PSYNEULINK, MODEL_SPEC_ID_METADATA, \ MODEL_SPEC_ID_INPUT_PORTS, MODEL_SPEC_ID_OUTPUT_PORTS, \ MODEL_SPEC_ID_MDF_VARIABLE, \ MODULATORY_SPEC_KEYWORDS, NAME, OUTPUT_PORTS, OWNER, PARAMS, PREFS_ARG, \ - RESET_STATEFUL_FUNCTION_WHEN, INPUT_SHAPES, VALUE, VARIABLE, SHARED_COMPONENT_TYPES + RANDOM, RESET_STATEFUL_FUNCTION_WHEN, INPUT_SHAPES, VALUE, VARIABLE, SHARED_COMPONENT_TYPES from psyneulink.core.globals.log import LogCondition from psyneulink.core.globals.parameters import \ Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, check_user_specified, copy_parameter_value, is_array_like @@ -1388,6 +1388,9 @@ def _get_compilation_state(self): if cost_functions.DURATION not in cost_functions: blacklist.add('duration_cost_fct') + if getattr(self, "mode", None) == DETERMINISTIC and getattr(self, "tie", None) != RANDOM: + whitelist.remove('random_state') + # Drop previous_value from MemoryFunctions if hasattr(self.parameters, 'duplicate_keys'): blacklist.add("previous_value") @@ -1505,13 +1508,20 @@ def _get_compilation_params(self): "retain_torch_trained_outputs", "retain_torch_targets", "retain_torch_losses" "torch_trained_outputs", "torch_targets", "torch_losses", # should be added to relevant _gen_llvm_function... when aug: - # OneHot: - 'abs_val', 'indicator', # SoftMax: 'mask_threshold', 'adapt_scale', 'adapt_base', 'adapt_entropy_weighting', # LCAMechanism "mask" } + + # OneHot: + # * runtime abs_val and indicator are only used in deterministic mode. + # * random_state and seed are only used in RANDOM tie resolution. + if getattr(self, "mode", None) != DETERMINISTIC: + blacklist.update(['abs_val', 'indicator']) + elif getattr(self, "tie", None) != RANDOM: + blacklist.add("seed") + # Mechanism's need few extra entries: # * matrix -- is never used directly, and is flatened below # * integration_rate -- shape mismatch with param port input From cd9ecee004c47d8b6c8818e62f20dc0dec49e8b3 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 20 Nov 2024 10:14:12 -0500 Subject: [PATCH 20/34] llvm/OneHot: Add basic implementation of DETERMINISTIC mode Use "recursive_iterate_arrays" to support 2d input. Signed-off-by: Jan Vesely --- .../nonstateful/selectionfunctions.py | 18 ++++++++++++------ tests/functions/test_selection.py | 3 ++- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index 54d3d023dd..28b3830c03 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -477,7 +477,16 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, return builder elif self.mode == DETERMINISTIC: - assert False, "DETERMINISTIC mode not supported" + direction = self.direction + tie = self.tie + abs_val_ptr = ctx.get_param_or_state_ptr(builder, self, self.parameters.abs_val, param_struct_ptr=params) + indicator_ptr = ctx.get_param_or_state_ptr(builder, self, self.parameters.indicator, param_struct_ptr=params) + + abs_val = builder.load(abs_val_ptr) + is_abs_val = builder.fcmp_unordered("!=", abs_val, abs_val.type(0)) + + indicator = builder.load(indicator_ptr) + is_indicator = builder.fcmp_unordered("!=", indicator, indicator.type(0)) else: direction, abs_val, indicator, tie = self._parse_mode(self.mode) @@ -492,9 +501,8 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, fabs_f = ctx.get_builtin("fabs", [extreme_val_ptr.type.pointee]) - with pnlvm.helpers.array_ptr_loop(builder, arg_in, "count_extremes") as (loop_builder, idx): + with pnlvm.helpers.recursive_iterate_arrays(ctx, builder, arg_in, loop_id="count_extremes") as (loop_builder, current_ptr): - current_ptr = loop_builder.gep(arg_in, [ctx.int32_ty(0), idx]) current = loop_builder.load(current_ptr) current_abs = loop_builder.call(fabs_f, [current]) current = builder.select(is_abs_val, current_abs, current) @@ -544,8 +552,7 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, builder.load(num_extremes_ptr), tags={"one_hot"}) - with pnlvm.helpers.array_ptr_loop(builder, arg_in, "mark_extremes") as (loop_builder, idx): - current_ptr = loop_builder.gep(arg_in, [ctx.int32_ty(0), idx]) + with pnlvm.helpers.recursive_iterate_arrays(ctx, builder, arg_in, arg_out, loop_id="mark_extremes") as (loop_builder, current_ptr, out_ptr): current = loop_builder.load(current_ptr) current_abs = loop_builder.call(fabs_f, [current]) current = builder.select(is_abs_val, current_abs, current) @@ -564,7 +571,6 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, should_write_extreme = loop_builder.and_(should_write_extreme, is_before_stop) write_value = loop_builder.select(should_write_extreme, extreme_write_val, extreme_write_val.type(0)) - out_ptr = loop_builder.gep(arg_out, [ctx.int32_ty(0), idx]) loop_builder.store(write_value, out_ptr) return builder diff --git a/tests/functions/test_selection.py b/tests/functions/test_selection.py index 039773c7ff..5ee96801fc 100644 --- a/tests/functions/test_selection.py +++ b/tests/functions/test_selection.py @@ -85,7 +85,6 @@ def test_basic(func, variable, params, expected, benchmark, func_mode): @pytest.mark.benchmark -@pytest.mark.llvm_not_implemented @pytest.mark.parametrize("variable, direction, abs_val, tie, expected", [ # simple @@ -255,6 +254,8 @@ def test_basic(func, variable, params, expected, benchmark, func_mode): ], ids=lambda x: x if isinstance(x, str) else str(getattr(x, 'shape', '')) ) @pytest.mark.parametrize("indicator", ["indicator", "value"]) def test_one_hot_mode_deterministic(benchmark, variable, tie, indicator, direction, abs_val, expected, func_mode): + if func_mode != "Python" and tie == kw.RANDOM: + pytest.skip("not implemented") f = pnl.OneHot(default_variable=np.zeros_like(variable), mode=kw.DETERMINISTIC, From a2cb5ae89ca429dd8f516d26a3bc92aecb9f3be0 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 13 Nov 2024 09:20:38 -0500 Subject: [PATCH 21/34] tests/llvm: Test more int32 samples in MT builtin test Change test name to include "int32" Use np.uint32 type for numpy PRNG Signed-off-by: Jan Vesely --- tests/llvm/test_builtins_mt_random.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/llvm/test_builtins_mt_random.py b/tests/llvm/test_builtins_mt_random.py index f19f01e78e..b9484e4a60 100644 --- a/tests/llvm/test_builtins_mt_random.py +++ b/tests/llvm/test_builtins_mt_random.py @@ -10,7 +10,7 @@ @pytest.mark.parametrize('mode', ['Python', 'numpy', pytest.param('LLVM', marks=pytest.mark.llvm), pytest.helpers.cuda_param('PTX')]) -def test_random_int(benchmark, mode): +def test_random_int32(benchmark, mode): res = [] if mode == 'Python': state = random.Random(SEED) @@ -23,7 +23,7 @@ def f(): state = np.random.RandomState([SEED]) def f(): - return state.randint(0xffffffff, dtype=np.int64) + return state.randint(0xffffffff, dtype=np.uint32) elif mode == 'LLVM': init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') @@ -57,8 +57,8 @@ def f(): else: assert False, "Unknown mode: {}".format(mode) - res = [f(), f()] - np.testing.assert_allclose(res, [3626764237, 1654615998]) + res = [f(), f(), f(), f(), f()] + np.testing.assert_allclose(res, [3626764237, 1654615998, 3255389356, 3823568514, 1806341205]) benchmark(f) From 014ca35c2b0d402080899b07259aabc0e88dde27 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 13 Nov 2024 10:32:59 -0500 Subject: [PATCH 22/34] llvm: Implement range integer generation to match Numpy's Uses Lemire's algorithm [0]. Applies to Philox PRNG using "integers" API call, "randint" uses older, "masked rejection sampling" approach. [0] https://arxiv.org/abs/1805.10941 Signed-off-by: Jan Vesely --- psyneulink/core/llvm/builtins.py | 90 ++++++++++++++++++++++- tests/llvm/test_builtins_philox_random.py | 58 ++++++++++++++- 2 files changed, 141 insertions(+), 7 deletions(-) diff --git a/psyneulink/core/llvm/builtins.py b/psyneulink/core/llvm/builtins.py index 20920ccf59..8b7599ab5b 100644 --- a/psyneulink/core/llvm/builtins.py +++ b/psyneulink/core/llvm/builtins.py @@ -17,8 +17,10 @@ def _setup_builtin_func_builder(ctx, name, args, *, return_type=ir.VoidType()): - builder = ctx.create_llvm_function(args, None, _BUILTIN_PREFIX + name, - return_type=return_type) + if not name.startswith(_BUILTIN_PREFIX): + name = _BUILTIN_PREFIX + name + + builder = ctx.create_llvm_function(args, None, name, return_type=return_type) # Add noalias attribute for a in builder.function.args: @@ -757,7 +759,6 @@ def _setup_mt_rand_integer(ctx, state_ty): return builder.function - def _setup_mt_rand_float(ctx, state_ty, gen_int): """ Mersenne Twister double prcision random number generation. @@ -1138,6 +1139,88 @@ def _setup_philox_rand_int32(ctx, state_ty, gen_int64): return builder.function +def _setup_rand_lemire_int32(ctx, state_ty, gen_int32): + """ + Uses Lemire's algorithm - https://arxiv.org/abs/1805.10941 + As implemented in Numpy to match Numpy results. + """ + + out_ty = gen_int32.args[1].type.pointee + builder = _setup_builtin_func_builder(ctx, gen_int32.name + "_bounded", (state_ty.as_pointer(), out_ty, out_ty, out_ty.as_pointer())) + state, lower, upper, out_ptr = builder.function.args + + rand_range_excl = builder.sub(upper, lower) + rand_range_excl_64 = builder.zext(rand_range_excl, ir.IntType(64)) + rand_range = builder.sub(rand_range_excl, rand_range_excl.type(1)) + + + builder.call(gen_int32, [state, out_ptr]) + val = builder.load(out_ptr) + + is_full_range = builder.icmp_unsigned("==", rand_range, rand_range.type(0xffffffff)) + with builder.if_then(is_full_range): + builder.ret_void() + + val64 = builder.zext(val, rand_range_excl_64.type) + m = builder.mul(val64, rand_range_excl_64) + + # Store current result as output. It will be overwritten below if needed. + out_val = builder.lshr(m, m.type(32)) + out_val = builder.trunc(out_val, out_ptr.type.pointee) + out_val = builder.add(out_val, lower) + builder.store(out_val, out_ptr) + + leftover = builder.and_(m, m.type(0xffffffff)) + + is_good = builder.icmp_unsigned(">=", leftover, rand_range_excl_64) + with builder.if_then(is_good): + builder.ret_void() + + # Apply rejection sampling + leftover_ptr = builder.alloca(leftover.type) + builder.store(leftover, leftover_ptr) + + rand_range_64 = builder.zext(rand_range, ir.IntType(64)) + threshold = builder.sub(rand_range_64, rand_range_64.type(0xffffffff)) + threshold = builder.urem(threshold, rand_range_excl_64) + + cond_block = builder.append_basic_block("bounded_cond_block") + loop_block = builder.append_basic_block("bounded_loop_block") + out_block = builder.append_basic_block("bounded_out_block") + + builder.branch(cond_block) + + # Condition: leftover < threshold + builder.position_at_end(cond_block) + leftover = builder.load(leftover_ptr) + do_next = builder.icmp_unsigned("<", leftover, threshold) + builder.cbranch(do_next, loop_block, out_block) + + # Loop block: + # m = ((uint64_t)next_uint32(bitgen_state)) * rng_excl; + # leftover = m & 0xffffffff + # result = m >> 32 + builder.position_at_end(loop_block) + builder.call(gen_int32, [state, out_ptr]) + + val = builder.load(out_ptr) + val64 = builder.zext(val, rand_range_excl_64.type) + m = builder.mul(val64, rand_range_excl_64) + + leftover = builder.and_(m, m.type(0xffffffff)) + builder.store(leftover, leftover_ptr) + + out_val = builder.lshr(m, m.type(32)) + out_val = builder.trunc(out_val, out_ptr.type.pointee) + out_val = builder.add(out_val, lower) + builder.store(out_val, out_ptr) + builder.branch(cond_block) + + + builder.position_at_end(out_block) + builder.ret_void() + + def _setup_philox_rand_double(ctx, state_ty, gen_int64): # Generate random float number generator function double_ty = ir.DoubleType() @@ -2087,6 +2170,7 @@ def setup_philox(ctx): _setup_rand_binomial(ctx, state_ty, gen_double, prefix="philox") gen_int32 = _setup_philox_rand_int32(ctx, state_ty, gen_int64) + _setup_rand_lemire_int32(ctx, state_ty, gen_int32) gen_float = _setup_philox_rand_float(ctx, state_ty, gen_int32) _setup_philox_rand_normal(ctx, state_ty, gen_float, gen_int32, _wi_float_data, _ki_i32_data, _fi_float_data) _setup_rand_binomial(ctx, state_ty, gen_float, prefix="philox") diff --git a/tests/llvm/test_builtins_philox_random.py b/tests/llvm/test_builtins_philox_random.py index 56b6485b75..764e3049f5 100644 --- a/tests/llvm/test_builtins_philox_random.py +++ b/tests/llvm/test_builtins_philox_random.py @@ -16,7 +16,6 @@ (0xfeedcafe, [14360762734736817955, 5188080951818105836, 1417692977344505657, 15919241602363537044, 11006348070701344872, 12539562470140893435]), ]) def test_random_int64(benchmark, mode, seed, expected): - res = [] if mode == 'numpy': state = np.random.Philox([np.int64(seed).astype(np.uint64)]) prng = np.random.Generator(state) @@ -60,12 +59,65 @@ def f(): benchmark(f) +@pytest.mark.benchmark(group="Philox integer PRNG") +@pytest.mark.parametrize('mode', ['numpy', + pytest.param('LLVM', marks=pytest.mark.llvm), + pytest.helpers.cuda_param('PTX')]) +@pytest.mark.parametrize("bounds, expected", + [((0xffffffff,), [582496169, 60417458, 4027530181, 1107101889, 1659784452, 2025357889]), + ((15,), [2, 0, 14, 3, 5, 7]), + ((0,15), [2, 0, 14, 3, 5, 7]), + ((5,0xffff), [8892, 926, 61454, 16896, 25328, 30906]), + ], ids=lambda x: str(x) if len(x) != 6 else "") +def test_random_int32_bounded(benchmark, mode, bounds, expected): + if mode == 'numpy': + state = np.random.Philox([SEED]) + prng = np.random.Generator(state) + + def f(): + return prng.integers(*bounds, dtype=np.uint32, endpoint=False) + + elif mode == 'LLVM': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') + state = init_fun.np_buffer_for_arg(0) + init_fun(state, SEED) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int32_bounded') + + def f(): + lower, upper = bounds if len(bounds) == 2 else (0, bounds[0]) + out = gen_fun.np_buffer_for_arg(3) + gen_fun(state, lower, upper, out) + return out + + elif mode == 'PTX': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') + state_size = init_fun.np_buffer_for_arg(0).nbytes + gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) + init_fun.cuda_call(gpu_state, np.int64(SEED)) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int32_bounded') + out = gen_fun.np_buffer_for_arg(3) + gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + + def f(): + lower, upper = bounds if len(bounds) == 2 else (0, bounds[0]) + gen_fun.cuda_call(gpu_state, np.uint32(lower), np.uint32(upper), gpu_out) + return out.copy() + + else: + assert False, "Unknown mode: {}".format(mode) + + # Get >4 samples to force regeneration of Philox buffer + res = [f(), f(), f(), f(), f(), f()] + np.testing.assert_allclose(res, expected) + benchmark(f) + @pytest.mark.benchmark(group="Philox integer PRNG") @pytest.mark.parametrize('mode', ['numpy', pytest.param('LLVM', marks=pytest.mark.llvm), pytest.helpers.cuda_param('PTX')]) def test_random_int32(benchmark, mode): - res = [] if mode == 'numpy': state = np.random.Philox([SEED]) prng = np.random.Generator(state)\ @@ -114,7 +166,6 @@ def f(): pytest.param('LLVM', marks=pytest.mark.llvm), pytest.helpers.cuda_param('PTX')]) def test_random_double(benchmark, mode): - res = [] if mode == 'numpy': state = np.random.Philox([SEED]) prng = np.random.Generator(state) @@ -161,7 +212,6 @@ def f(): pytest.param('LLVM', marks=pytest.mark.llvm), pytest.helpers.cuda_param('PTX')]) def test_random_float(benchmark, mode): - res = [] if mode == 'numpy': state = np.random.Philox([SEED]) prng = np.random.Generator(state) From 157d13984518c128a9db9dce96b62851e792e1ec Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 13 Nov 2024 14:47:52 -0500 Subject: [PATCH 23/34] llvm: Implement range integer generation to match old Numpy's Uses bit masked rejection sampling of lower bits. Matches to Numpy's Random.randint API call. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/builtins.py | 47 ++++++++++++++++++-- tests/llvm/test_builtins_mt_random.py | 63 ++++++++++++++++++++++++++- 2 files changed, 104 insertions(+), 6 deletions(-) diff --git a/psyneulink/core/llvm/builtins.py b/psyneulink/core/llvm/builtins.py index 8b7599ab5b..f965c819ad 100644 --- a/psyneulink/core/llvm/builtins.py +++ b/psyneulink/core/llvm/builtins.py @@ -658,10 +658,11 @@ def _setup_mt_rand_init(ctx, state_ty, init_scalar): return builder.function -def _setup_mt_rand_integer(ctx, state_ty): +def _setup_mt_rand_int32(ctx, state_ty): int64_ty = ir.IntType(64) + # Generate random number generator function. - # It produces random 32bit numberin a 64bit word + # It produces random 32bit number in a 64bit word builder = _setup_builtin_func_builder(ctx, "mt_rand_int32", (state_ty.as_pointer(), int64_ty.as_pointer())) state, out = builder.function.args @@ -759,6 +760,43 @@ def _setup_mt_rand_integer(ctx, state_ty): return builder.function + +def _setup_rand_bounded_int32(ctx, state_ty, gen_int32): + + out_ty = gen_int32.args[1].type.pointee + builder = _setup_builtin_func_builder(ctx, gen_int32.name + "_bounded", (state_ty.as_pointer(), ctx.int32_ty, ctx.int32_ty, out_ty.as_pointer())) + state, lower, upper, out_ptr = builder.function.args + + rand_range_excl = builder.sub(upper, lower) + rand_range_excl = builder.zext(rand_range_excl, out_ty) + + range_leading_zeros = builder.ctlz(rand_range_excl, ctx.bool_ty(1)) + mask = builder.lshr(range_leading_zeros.type(-1), range_leading_zeros) + + loop_block = builder.append_basic_block("bounded_loop_block") + out_block = builder.append_basic_block("bounded_out_block") + + builder.branch(loop_block) + + # Loop: + # do: + # r = random() & mask + # while r >= limit + builder.position_at_end(loop_block) + + builder.call(gen_int32, [state, out_ptr]) + val = builder.load(out_ptr) + val = builder.and_(val, mask) + + is_above_limit = builder.icmp_unsigned(">=", val, rand_range_excl) + builder.cbranch(is_above_limit, loop_block, out_block) + + builder.position_at_end(out_block) + offset = builder.zext(lower, val.type) + result = builder.add(val, offset) + builder.store(result, out_ptr) + builder.ret_void() + def _setup_mt_rand_float(ctx, state_ty, gen_int): """ Mersenne Twister double prcision random number generation. @@ -893,8 +931,9 @@ def setup_mersenne_twister(ctx): init_scalar = _setup_mt_rand_init_scalar(ctx, state_ty) _setup_mt_rand_init(ctx, state_ty, init_scalar) - gen_int = _setup_mt_rand_integer(ctx, state_ty) - gen_float = _setup_mt_rand_float(ctx, state_ty, gen_int) + gen_int32 = _setup_mt_rand_int32(ctx, state_ty) + _setup_rand_bounded_int32(ctx, state_ty, gen_int32) + gen_float = _setup_mt_rand_float(ctx, state_ty, gen_int32) _setup_mt_rand_normal(ctx, state_ty, gen_float) _setup_rand_binomial(ctx, state_ty, gen_float, prefix="mt") diff --git a/tests/llvm/test_builtins_mt_random.py b/tests/llvm/test_builtins_mt_random.py index b9484e4a60..d3e30fd507 100644 --- a/tests/llvm/test_builtins_mt_random.py +++ b/tests/llvm/test_builtins_mt_random.py @@ -6,12 +6,70 @@ SEED = 0 +@pytest.mark.benchmark(group="Mersenne Twister bounded integer PRNG") +@pytest.mark.parametrize('mode', ['numpy', + pytest.param('LLVM', marks=pytest.mark.llvm), + pytest.helpers.cuda_param('PTX')]) +@pytest.mark.parametrize("bounds, expected", + [((0xffffffff,), [3626764237, 1654615998, 3255389356, 3823568514, 1806341205]), + ((14,), [13, 12, 2, 5, 4]), + ((0,14), [13, 12, 2, 5, 4]), + ((5,0xffff), [2002, 28611, 19633, 1671, 37978]), + ], ids=lambda x: str(x) if len(x) != 5 else "") +# Python uses sampling of upper bits (vs. lower bits in Numpy). Skip it in this test. +def test_random_int32_bounded(benchmark, mode, bounds, expected): + + if mode == 'numpy': + # Numpy promotes elements to int64 + state = np.random.RandomState([SEED]) + + def f(): + return state.randint(*bounds, dtype=np.uint32) + + elif mode == 'LLVM': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') + state = init_fun.np_buffer_for_arg(0) + + init_fun(state, SEED) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_int32_bounded') + + def f(): + lower, upper = bounds if len(bounds) == 2 else (0, bounds[0]) + out = gen_fun.np_buffer_for_arg(3) + gen_fun(state, lower, upper, out) + return out + + elif mode == 'PTX': + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') + + state_size = init_fun.np_buffer_for_arg(0).nbytes + gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) + + init_fun.cuda_call(gpu_state, np.int32(SEED)) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_int32_bounded') + out = gen_fun.np_buffer_for_arg(3) + gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + + def f(): + lower, upper = bounds if len(bounds) == 2 else (0, bounds[0]) + gen_fun.cuda_call(gpu_state, np.uint32(lower), np.uint32(upper), gpu_out) + return out.copy() + + else: + assert False, "Unknown mode: {}".format(mode) + + res = [f(), f(), f(), f(), f()] + np.testing.assert_allclose(res, expected) + benchmark(f) + @pytest.mark.benchmark(group="Mersenne Twister integer PRNG") @pytest.mark.parametrize('mode', ['Python', 'numpy', pytest.param('LLVM', marks=pytest.mark.llvm), pytest.helpers.cuda_param('PTX')]) def test_random_int32(benchmark, mode): - res = [] + if mode == 'Python': state = random.Random(SEED) @@ -67,7 +125,7 @@ def f(): pytest.param('LLVM', marks=pytest.mark.llvm), pytest.helpers.cuda_param('PTX')]) def test_random_float(benchmark, mode): - res = [] + if mode == 'Python': # Python treats every seed as array state = random.Random(SEED) @@ -124,6 +182,7 @@ def f(): pytest.helpers.cuda_param('PTX')]) # Python uses different algorithm so skip it in this test def test_random_normal(benchmark, mode): + if mode == 'numpy': # numpy promotes elements to int64 state = np.random.RandomState([SEED]) From 443c6e1f79ee43c09bbea3beb95b8050a5045004 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 20 Nov 2024 16:31:20 -0500 Subject: [PATCH 24/34] llvm/OneHot: Implement support for RANDOM tie resolution Signed-off-by: Jan Vesely --- .../functions/nonstateful/selectionfunctions.py | 16 ++++++++++++++-- psyneulink/core/llvm/builder_context.py | 17 +++++++++++++++++ tests/functions/test_selection.py | 2 -- 3 files changed, 31 insertions(+), 4 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index 28b3830c03..dd652265a8 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -428,9 +428,9 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, sum_ptr = builder.alloca(ctx.float_ty) builder.store(sum_ptr.type.pointee(-0.0), sum_ptr) - random_draw_ptr = builder.alloca(ctx.float_ty) rand_state_ptr = ctx.get_random_state_ptr(builder, self, state, params) rng_f = ctx.get_uniform_dist_function_by_state(rand_state_ptr) + random_draw_ptr = builder.alloca(rng_f.args[-1].type.pointee) builder.call(rng_f, [rand_state_ptr, random_draw_ptr]) random_draw = builder.load(random_draw_ptr) @@ -534,8 +534,20 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, extreme_start = num_extremes_ptr.type.pointee(0) extreme_stop = builder.load(num_extremes_ptr) + elif tie == RANDOM: + rand_state_ptr = ctx.get_random_state_ptr(builder, self, state, params) + rand_f = ctx.get_rand_int_function_by_state(rand_state_ptr) + random_draw_ptr = builder.alloca(rand_f.args[-1].type.pointee) + num_extremes = builder.load(num_extremes_ptr) + + builder.call(rand_f, [rand_state_ptr, ctx.int32_ty(0), num_extremes, random_draw_ptr]) + + extreme_start = builder.load(random_draw_ptr) + extreme_start = builder.trunc(extreme_start, ctx.int32_ty) + extreme_stop = builder.add(extreme_start, extreme_start.type(1)) + else: - assert False + assert False, "Unknown tie resolution: {}".format(tie) extreme_val = builder.load(extreme_val_ptr) diff --git a/psyneulink/core/llvm/builder_context.py b/psyneulink/core/llvm/builder_context.py index 0dcb6bae85..7fcd4224cd 100644 --- a/psyneulink/core/llvm/builder_context.py +++ b/psyneulink/core/llvm/builder_context.py @@ -210,29 +210,46 @@ def init_builtins(self): if "time_stat" in debug_env: print("Time to setup PNL builtins: {}".format(finish - start)) + def get_rand_int_function_by_state(self, state): + if len(state.type.pointee) == 5: + return self.import_llvm_function("__pnl_builtin_mt_rand_int32_bounded") + + elif len(state.type.pointee) == 7: + # we have different versions based on selected FP precision + return self.import_llvm_function("__pnl_builtin_philox_rand_int32_bounded") + + else: + assert False, "Unknown PRNG type!" + def get_uniform_dist_function_by_state(self, state): if len(state.type.pointee) == 5: return self.import_llvm_function("__pnl_builtin_mt_rand_double") + elif len(state.type.pointee) == 7: # we have different versions based on selected FP precision return self.import_llvm_function("__pnl_builtin_philox_rand_{}".format(str(self.float_ty))) + else: assert False, "Unknown PRNG type!" def get_binomial_dist_function_by_state(self, state): if len(state.type.pointee) == 5: return self.import_llvm_function("__pnl_builtin_mt_rand_binomial") + elif len(state.type.pointee) == 7: return self.import_llvm_function("__pnl_builtin_philox_rand_binomial") + else: assert False, "Unknown PRNG type!" def get_normal_dist_function_by_state(self, state): if len(state.type.pointee) == 5: return self.import_llvm_function("__pnl_builtin_mt_rand_normal") + elif len(state.type.pointee) == 7: # Normal exists only for self.float_ty return self.import_llvm_function("__pnl_builtin_philox_rand_normal") + else: assert False, "Unknown PRNG type!" diff --git a/tests/functions/test_selection.py b/tests/functions/test_selection.py index 5ee96801fc..dea0ab9e06 100644 --- a/tests/functions/test_selection.py +++ b/tests/functions/test_selection.py @@ -254,8 +254,6 @@ def test_basic(func, variable, params, expected, benchmark, func_mode): ], ids=lambda x: x if isinstance(x, str) else str(getattr(x, 'shape', '')) ) @pytest.mark.parametrize("indicator", ["indicator", "value"]) def test_one_hot_mode_deterministic(benchmark, variable, tie, indicator, direction, abs_val, expected, func_mode): - if func_mode != "Python" and tie == kw.RANDOM: - pytest.skip("not implemented") f = pnl.OneHot(default_variable=np.zeros_like(variable), mode=kw.DETERMINISTIC, From 8822de05d78e6affb168e1bfa59ea4f953b0a6e3 Mon Sep 17 00:00:00 2001 From: kmantel <1592123+kmantel@users.noreply.github.com> Date: Wed, 20 Nov 2024 20:33:05 -0500 Subject: [PATCH 25/34] Component: make deprecated arg error via illegal args; deprecate 'size' (#3123) --- psyneulink/core/components/component.py | 19 ++++++++++++++++++ tests/components/test_component.py | 26 +++++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index fa00fbf1be..8beb4fc7c6 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -931,6 +931,9 @@ class Component(MDFSerializable, metaclass=ComponentsMeta): componentType = None standard_constructor_args = {EXECUTE_UNTIL_FINISHED, FUNCTION_PARAMS, MAX_EXECUTIONS_BEFORE_FINISHED, RESET_STATEFUL_FUNCTION_WHEN, INPUT_SHAPES} + deprecated_constructor_args = { + 'size': 'input_shapes', + } # helper attributes for MDF model spec _model_spec_id_parameters = 'parameters' @@ -2150,8 +2153,11 @@ def alias_conflicts(alias, passed_name): conflicting_aliases = [] unused_constructor_args = {} + deprecated_args = {} for p in self.parameters: if p.name in illegal_passed_args: + # p must have a constructor_argument, because otherwise + # p.name would not be in illegal_passed_args assert p.constructor_argument is not None unused_constructor_args[p.name] = p.constructor_argument @@ -2164,6 +2170,12 @@ def alias_conflicts(alias, passed_name): if alias_conflicts(p, passed_name): conflicting_aliases.append((p.source.name, passed_name, p.name)) + for arg in illegal_passed_args: + try: + deprecated_args[arg] = self.deprecated_constructor_args[arg] + except KeyError: + continue + # raise constructor arg errors if len(unused_constructor_args) > 0: raise create_illegal_argument_error([ @@ -2171,6 +2183,13 @@ def alias_conflicts(alias, passed_name): for arg, constr_arg in unused_constructor_args.items() ]) + # raise deprecated argument errors + if len(deprecated_args) > 0: + raise create_illegal_argument_error([ + f"'{arg}' is deprecated. Use '{new_arg}' instead" + for arg, new_arg in deprecated_args.items() + ]) + # raise generic illegal argument error unknown_args = illegal_passed_args.difference(unused_constructor_args) if len(unknown_args) > 0: diff --git a/tests/components/test_component.py b/tests/components/test_component.py index 08237bec3e..668e738c72 100644 --- a/tests/components/test_component.py +++ b/tests/components/test_component.py @@ -130,6 +130,13 @@ def test_execute_manual_context(self, component_type): class TestConstructorArguments: class NewTestMech(pnl.Mechanism_Base): + deprecated_constructor_args = { + **pnl.Mechanism_Base.deprecated_constructor_args, + **{ + 'deprecated_param': 'new_param', + } + } + class Parameters(pnl.Mechanism_Base.Parameters): cca_param = pnl.Parameter('A', constructor_argument='cca_constr') param_with_alias = pnl.Parameter(None, constructor_argument='pwa_constr_arg', aliases=['pwa_alias']) @@ -226,6 +233,25 @@ def test_invalid_argument(self, cls_, argument_name, param_value, params_dict_en constr_arg = getattr(cls_.parameters, argument_name).constructor_argument assert f"'{argument_name}': must use '{constr_arg}' instead" in str(err.value) + @pytest.mark.parametrize( + 'cls_, argument_name, new_name', + [ + (NewTestMech, 'deprecated_param', 'new_param'), + (NewTestMech, 'size', 'input_shapes'), + (pnl.TransferMechanism, 'size', 'input_shapes'), + ] + ) + @pytest.mark.parametrize('params_dict_entry', [NotImplemented, 'params']) + def test_invalid_argument_deprecated(self, cls_, argument_name, new_name, params_dict_entry): + with pytest.raises( + pnl.ComponentError, + match=( + rf".*Illegal argument in constructor \(type: {cls_.__name__}\):" + f"\n\t'{argument_name}' is deprecated. Use '{new_name}' instead" + ) + ): + cls_(**nest_dictionary({argument_name: new_name}, params_dict_entry)) + @pytest.mark.parametrize( 'cls_, param_name, param_value, alias_name, alias_value', [ From 735890d60cb0a8b99204d8d89a08b2cff6c17b4a Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 21 Nov 2024 00:15:12 -0500 Subject: [PATCH 26/34] llvm/OneHot: Simplify PROB/PROB_INDICATOR implementation It no longer needs to fit search for extreme values. Signed-off-by: Jan Vesely --- .../nonstateful/selectionfunctions.py | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index dd652265a8..defd01e050 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -422,8 +422,6 @@ def _validate_params(self, request_set, target_set=None, context=None): def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): if self.mode in {PROB, PROB_INDICATOR}: - best_idx_ptr = builder.alloca(ctx.int32_ty) - builder.store(best_idx_ptr.type.pointee(0), best_idx_ptr) sum_ptr = builder.alloca(ctx.float_ty) builder.store(sum_ptr.type.pointee(-0.0), sum_ptr) @@ -438,8 +436,6 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, arg_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) with pnlvm.helpers.array_ptr_loop(builder, arg_in, "search") as (b1, idx): - best_idx = b1.load(best_idx_ptr) - best_ptr = b1.gep(arg_in, [ctx.int32_ty(0), best_idx]) current_ptr = b1.gep(arg_in, [ctx.int32_ty(0), idx]) current = b1.load(current_ptr) @@ -454,25 +450,14 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, new_above = b1.fcmp_ordered("<", random_draw, sum_new) cond = b1.and_(new_above, old_below) - cmp_prev = current.type(1.0) - cmp_curr = b1.select(cond, cmp_prev, cmp_prev.type(0.0)) - cmp_op = "==" if self.mode == PROB: val = current else: val = current.type(1.0) - prev_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), best_idx]) + write_val = b1.select(cond, val, val.type(0.0)) cur_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), idx]) - - # Make sure other elements are zeroed - builder.store(cur_res_ptr.type.pointee(0), cur_res_ptr) - - cmp_res = builder.fcmp_unordered(cmp_op, cmp_curr, cmp_prev) - with builder.if_then(cmp_res): - builder.store(prev_res_ptr.type.pointee(0), prev_res_ptr) - builder.store(val, cur_res_ptr) - builder.store(idx, best_idx_ptr) + builder.store(write_val, cur_res_ptr) return builder From 06bff2b3e11a930737c6802f49e7ff83253beb30 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 21 Nov 2024 09:24:08 -0500 Subject: [PATCH 27/34] tests/llvm/random: Use array_equal to test integer results Signed-off-by: Jan Vesely --- tests/llvm/test_builtins_mt_random.py | 4 ++-- tests/llvm/test_builtins_philox_random.py | 11 +++++------ 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/tests/llvm/test_builtins_mt_random.py b/tests/llvm/test_builtins_mt_random.py index d3e30fd507..37a1730c9d 100644 --- a/tests/llvm/test_builtins_mt_random.py +++ b/tests/llvm/test_builtins_mt_random.py @@ -61,7 +61,7 @@ def f(): assert False, "Unknown mode: {}".format(mode) res = [f(), f(), f(), f(), f()] - np.testing.assert_allclose(res, expected) + np.testing.assert_array_equal(res, expected) benchmark(f) @pytest.mark.benchmark(group="Mersenne Twister integer PRNG") @@ -116,7 +116,7 @@ def f(): assert False, "Unknown mode: {}".format(mode) res = [f(), f(), f(), f(), f()] - np.testing.assert_allclose(res, [3626764237, 1654615998, 3255389356, 3823568514, 1806341205]) + np.testing.assert_array_equal(res, [3626764237, 1654615998, 3255389356, 3823568514, 1806341205]) benchmark(f) diff --git a/tests/llvm/test_builtins_philox_random.py b/tests/llvm/test_builtins_philox_random.py index 764e3049f5..8553e6054e 100644 --- a/tests/llvm/test_builtins_philox_random.py +++ b/tests/llvm/test_builtins_philox_random.py @@ -55,7 +55,7 @@ def f(): # Get >4 samples to force regeneration of Philox buffer res = [f(), f(), f(), f(), f(), f()] - np.testing.assert_allclose(res, expected) + np.testing.assert_array_equal(res, expected) benchmark(f) @@ -64,7 +64,7 @@ def f(): pytest.param('LLVM', marks=pytest.mark.llvm), pytest.helpers.cuda_param('PTX')]) @pytest.mark.parametrize("bounds, expected", - [((0xffffffff,), [582496169, 60417458, 4027530181, 1107101889, 1659784452, 2025357889]), + [((0xffffffff,), [582496168, 60417457, 4027530180, 1107101888, 1659784451, 2025357888]), ((15,), [2, 0, 14, 3, 5, 7]), ((0,15), [2, 0, 14, 3, 5, 7]), ((5,0xffff), [8892, 926, 61454, 16896, 25328, 30906]), @@ -110,7 +110,7 @@ def f(): # Get >4 samples to force regeneration of Philox buffer res = [f(), f(), f(), f(), f(), f()] - np.testing.assert_allclose(res, expected) + np.testing.assert_array_equal(res, expected) benchmark(f) @pytest.mark.benchmark(group="Philox integer PRNG") @@ -157,7 +157,7 @@ def f(): # Get >4 samples to force regeneration of Philox buffer res = [f(), f(), f(), f(), f(), f()] - np.testing.assert_allclose(res, [582496169, 60417458, 4027530181, 1107101889, 1659784452, 2025357889]) + np.testing.assert_array_equal(res, [582496169, 60417458, 4027530181, 1107101889, 1659784452, 2025357889]) benchmark(f) @@ -257,8 +257,7 @@ def f(): @pytest.mark.parametrize('mode', ['numpy', pytest.param('LLVM', marks=pytest.mark.llvm), pytest.helpers.cuda_param('PTX')]) -@pytest.mark.parametrize('fp_type', [pnlvm.ir.DoubleType(), pnlvm.ir.FloatType()], - ids=str) +@pytest.mark.parametrize('fp_type', [pnlvm.ir.DoubleType(), pnlvm.ir.FloatType()], ids=str) def test_random_normal(benchmark, mode, fp_type): if mode != 'numpy': # Instantiate builder context with the desired type From ad1e74396ddd07368fd4b5509b2c74d69478a52b Mon Sep 17 00:00:00 2001 From: jdcpni Date: Fri, 22 Nov 2024 07:39:41 -0500 Subject: [PATCH 28/34] refactor/emcomposition_field_handling (#3122) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * • emcomposition.py _parse_fields: clean up assignment of self.num_fields • test_emcomposition.py add test_assign_field_weights_and_0_vs_None() add test_field_weights_all_None_and_or_0 • emcomposition.py - revamp docstring to document new mods - add fields arg to specify field_naes, field_weights, learn_field_weights - implement fields arg to specify field_names, field_weights, learn_field_weights in dict format - implement support for field-specific learn_field_weight specifications - _identify_target_nodes(): refactor to use target_fields instead of learn_field_weights - add target_fields to fields specification dict - add dict spec for entries in fields arg - start adding field_idx to all components - add self._field_index_map • pytorchEMcompositionwrapper.py - store_memory(): use self._field_index_map to assign memories to fields • test_emcomposition.py - test_backpropagation_of_error_in_learning(): use EGO model to test for error backpropagation through EMCompoistion - test_field_args_and_map_assignments(): flesh out _field_index_map validation * • emcomposition.py - update docstring figs - add purge_by_field_weights Parameter * • autodiffcomposition.py - infer_backpropagation_learning_pathways(): add NodeRole.BIAS to pathways consructed for learning --- .../CSW => }/Environment.py | 0 .../DeclanParams.py | 21 +- .../EGO CSW Model (using RNN).py | 0 .../EGO CSW Model.py | 51 +- .../Coffee Shop World/Environment.py | 54 + .../Figures/EGO CSW Model (PyTorch).pdf | Bin .../Figures/EGO CSW Model (basic).pdf | Bin .../EGO CSW Model (learning and store).pdf | Bin .../Figures/EGO CSW Model (learning).pdf | Bin ...EGO CSW Model - EM (with PNL learning).pdf | Bin .../Figures/EGO CSW Model - EM.pdf | Bin .../Figures/EGO Paper Figure.jpg | Bin .../Figures/EMComposition (example BIG).pdf | Bin .../ScriptControl.py | 6 +- .../{CSW => Coffee Shop World}/TestParams.py | 22 +- .../{CSW => Coffee Shop World}/__init__.py | 0 .../_static/EMComposition_Example_fig.svg | 150 +- .../EMComposition_field_weights_different.pdf | Bin 0 -> 32265 bytes .../EMComposition_field_weights_different.svg | 312 ++-- .../EMComposition_field_weights_equal_fig.svg | 313 ++-- .../nonstateful/transformfunctions.py | 2 +- psyneulink/core/components/ports/inputport.py | 3 +- psyneulink/core/compositions/composition.py | 2 +- .../modulatory/learning/EMstoragemechanism.py | 2 +- .../compositions/autodiffcomposition.py | 25 +- .../library/compositions/emcomposition.py | 1332 +++++++++-------- .../pytorchEMcompositionwrapper.py | 33 +- tests/composition/test_emcomposition.py | 640 +++++++- 28 files changed, 1930 insertions(+), 1038 deletions(-) rename Scripts/{Models (Under Development)/EGO/Using EMComposition/CSW => }/Environment.py (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/DeclanParams.py (84%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/EGO CSW Model (using RNN).py (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/EGO CSW Model.py (91%) create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Environment.py rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/Figures/EGO CSW Model (PyTorch).pdf (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/Figures/EGO CSW Model (basic).pdf (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/Figures/EGO CSW Model (learning and store).pdf (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/Figures/EGO CSW Model (learning).pdf (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/Figures/EGO CSW Model - EM (with PNL learning).pdf (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/Figures/EGO CSW Model - EM.pdf (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/Figures/EGO Paper Figure.jpg (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/Figures/EMComposition (example BIG).pdf (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/ScriptControl.py (93%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/TestParams.py (86%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{CSW => Coffee Shop World}/__init__.py (100%) create mode 100644 docs/source/_static/EMComposition_field_weights_different.pdf diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Environment.py b/Scripts/Environment.py similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Environment.py rename to Scripts/Environment.py diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/DeclanParams.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/DeclanParams.py similarity index 84% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/DeclanParams.py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/DeclanParams.py index 7209121c18..c2dbbbf818 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/DeclanParams.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/DeclanParams.py @@ -39,10 +39,10 @@ def calc_prob(em_preds, test_ys): # Names: name = "EGO Model CSW", + em_name = "EM", state_input_layer_name = "STATE", previous_state_layer_name = "PREVIOUS STATE", context_layer_name = 'CONTEXT', - em_name = "EM", prediction_layer_name = "PREDICTION", # Structural @@ -50,10 +50,10 @@ def calc_prob(em_preds, test_ys): previous_state_d = 11, # length of state vector context_d = 11, # length of context vector memory_capacity = ALL, # number of entries in EM memory; ALL=> match to number of stims - # memory_init = (0,.0001), # Initialize memory with random values in interval - memory_init = None, # Initialize with zeros - concatenate_queries = False, - # concatenate_queries = True, + memory_init = (0,.0001), # Initialize memory with random values in interval + # memory_init = None, # Initialize with zeros + # concatenate_queries = False, + concatenate_queries = True, # environment # curriculum_type = 'Interleaved', @@ -63,20 +63,23 @@ def calc_prob(em_preds, test_ys): # Processing integration_rate = .69, # rate at which state is integrated into new context - # state_weight = 1, # weight of the state used during memory retrieval + # state_weight =normalize_field_weightsnormalize_field_weights 1, # weight of the state used during memory retrieval # context_weight = 1, # weight of the context used during memory retrieval - state_weight = .5, # weight of the state used during memory retrieval + previous_state_weight = .5, # weight of the state used during memory retrieval context_weight = .5, # weight of the context used during memory retrieval + state_weight = None, # weight of the state used during memory retrieval # normalize_field_weights = False, # whether to normalize the field weights during memory retrieval normalize_field_weights = True, # whether to normalize the field weights during memory retrieval + normalize_memories = False, # whether to normalize the memory during memory retrieval + # normalize_memories = True, # whether to normalize the memory during memory retrieval # softmax_temperature = None, # temperature of the softmax used during memory retrieval (smaller means more argmax-like softmax_temperature = .1, # temperature of the softmax used during memory retrieval (smaller means more argmax-like # softmax_temperature = ADAPTIVE, # temperature of the softmax used during memory retrieval (smaller means more argmax-like # softmax_temperature = CONTROL, # temperature of the softmax used during memory retrieval (smaller means more argmax-like # softmax_threshold = None, # threshold used to mask out small values in softmax softmax_threshold = .001, # threshold used to mask out small values in softmax - enable_learning=[False, False, True], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE - learn_field_weights = False, + # target_fields=[True, False, False], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE + enable_learning = True, loss_spec = Loss.BINARY_CROSS_ENTROPY, # loss_spec = Loss.MSE, learning_rate = .5, diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model (using RNN).py b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/EGO CSW Model (using RNN).py similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model (using RNN).py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/EGO CSW Model (using RNN).py diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/EGO CSW Model.py similarity index 91% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model.py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/EGO CSW Model.py index 18d3ba419b..52423c7dbb 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/EGO CSW Model.py @@ -174,7 +174,7 @@ 'CONTEXT', 'PREVIOUS STATE'], start=0) -state_retrieval_weight = 0 +state_retrieval_weight = None RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections if is_numeric_scalar(model_params['softmax_temperature']): # translate to gain of softmax retrieval function @@ -194,7 +194,7 @@ def construct_model(model_name:str=model_params['name'], state_size:int=model_params['state_d'], # Previous state - previous_state_input_name:str=model_params['previous_state_layer_name'], + previous_state_name:str=model_params['previous_state_layer_name'], # Context representation (learned): context_name:str=model_params['context_layer_name'], @@ -205,12 +205,15 @@ def construct_model(model_name:str=model_params['name'], em_name:str=model_params['em_name'], retrieval_softmax_gain=retrieval_softmax_gain, retrieval_softmax_threshold=model_params['softmax_threshold'], - state_retrieval_weight:Union[float,int]=state_retrieval_weight, - previous_state_retrieval_weight:Union[float,int]=model_params['state_weight'], + # state_retrieval_weight:Union[float,int]=state_retrieval_weight, + # previous_state_retrieval_weight:Union[float,int]=model_params['state_weight'], + state_retrieval_weight:Union[float,int]=model_params['state_weight'], + previous_state_retrieval_weight:Union[float,int]=model_params['previous_state_weight'], context_retrieval_weight:Union[float,int]=model_params['context_weight'], normalize_field_weights = model_params['normalize_field_weights'], + normalize_memories = model_params['normalize_memories'], concatenate_queries = model_params['concatenate_queries'], - learn_field_weights = model_params['learn_field_weights'], + enable_learning = model_params['enable_learning'], memory_capacity = memory_capacity, memory_init=model_params['memory_init'], @@ -219,7 +222,7 @@ def construct_model(model_name:str=model_params['name'], # Learning loss_spec=model_params['loss_spec'], - enable_learning=model_params['enable_learning'], + # target_fields=model_params['target_fields'], learning_rate = model_params['learning_rate'], device=model_params['device'] @@ -233,7 +236,7 @@ def construct_model(model_name:str=model_params['name'], # ---------------------------------------------------------------------------------------------------------------- state_input_layer = ProcessingMechanism(name=state_input_name, input_shapes=state_size) - previous_state_layer = ProcessingMechanism(name=previous_state_input_name, input_shapes=state_size) + previous_state_layer = ProcessingMechanism(name=previous_state_name, input_shapes=state_size) # context_layer = ProcessingMechanism(name=context_name, input_shapes=context_size) context_layer = TransferMechanism(name=context_name, input_shapes=context_size, @@ -241,6 +244,8 @@ def construct_model(model_name:str=model_params['name'], integrator_mode=True, integration_rate=integration_rate) + + em = EMComposition(name=em_name, memory_template=[[0] * state_size, # state [0] * state_size, # previous state @@ -250,6 +255,15 @@ def construct_model(model_name:str=model_params['name'], memory_decay_rate=0, softmax_gain=retrieval_softmax_gain, softmax_threshold=retrieval_softmax_threshold, + fields = {state_input_name: {FIELD_WEIGHT: state_retrieval_weight, + LEARN_FIELD_WEIGHT: False, + TARGET_FIELD: True}, + previous_state_name: {FIELD_WEIGHT:previous_state_retrieval_weight, + LEARN_FIELD_WEIGHT: False, + TARGET_FIELD: False}, + context_name: {FIELD_WEIGHT:context_retrieval_weight, + LEARN_FIELD_WEIGHT: False, + TARGET_FIELD: False}}, # Input Nodes: # field_names=[state_input_name, # previous_state_input_name, @@ -259,19 +273,20 @@ def construct_model(model_name:str=model_params['name'], # previous_state_retrieval_weight, # context_retrieval_weight # ), - field_names=[previous_state_input_name, - context_name, - state_input_name, - ], - field_weights=(previous_state_retrieval_weight, - context_retrieval_weight, - state_retrieval_weight, - ), + # field_names=[previous_state_input_name, + # context_name, + # state_input_name, + # ], + # field_weights=(previous_state_retrieval_weight, + # context_retrieval_weight, + # state_retrieval_weight, + # ), normalize_field_weights=normalize_field_weights, + normalize_memories=normalize_memories, concatenate_queries=concatenate_queries, - learn_field_weights=learn_field_weights, - learning_rate=learning_rate, enable_learning=enable_learning, + learning_rate=learning_rate, + # target_fields=target_fields, device=device ) @@ -311,7 +326,7 @@ def construct_model(model_name:str=model_params['name'], em] previous_state_to_em_pathway = [previous_state_layer, MappingProjection(sender=previous_state_layer, - receiver=em.nodes[previous_state_input_name+QUERY], + receiver=em.nodes[previous_state_name+QUERY], matrix=IDENTITY_MATRIX, learnable=False), em] diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Environment.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Environment.py new file mode 100644 index 0000000000..124de532c8 --- /dev/null +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Environment.py @@ -0,0 +1,54 @@ +import numpy as np +import torch +from torch.utils.data import dataset +from random import randint + +def one_hot_encode(labels, num_classes): + """ + One hot encode labels and convert to tensor. + """ + return torch.tensor((np.arange(num_classes) == labels[..., None]).astype(float),dtype=torch.float32) + +class DeterministicCSWDataset(dataset.Dataset): + def __init__(self, n_samples_per_context, contexts_to_load) -> None: + super().__init__() + raw_xs = np.array([ + [[9,1,3,5,7],[9,2,4,6,8]], + [[10,1,4,5,8],[10,2,3,6,7]] + ]) + + item_indices = np.random.choice(raw_xs.shape[1],sum(n_samples_per_context),replace=True) + task_names = [0,1] # Flexible so these can be renamed later + task_indices = [task_names.index(name) for name in contexts_to_load] + + context_indices = np.repeat(np.array(task_indices),n_samples_per_context) + self.xs = one_hot_encode(raw_xs[context_indices,item_indices],11) + + self.xs = self.xs.reshape((-1,11)) + self.ys = torch.cat([self.xs[1:],one_hot_encode(np.array([0]),11)],dim=0) + context_indices = np.repeat(np.array(task_indices),[x*5 for x in n_samples_per_context]) + self.contexts = one_hot_encode(context_indices, len(task_names)) + + # Remove the last transition since there's no next state available + self.xs = self.xs[:-1] + self.ys = self.ys[:-1] + self.contexts = self.contexts[:-1] + + def __len__(self): + return len(self.xs) + + def __getitem__(self, idx): + return self.xs[idx], self.contexts[idx], self.ys[idx] + +def generate_dataset(condition='Blocked'): + # Generate the dataset for either the blocked or interleaved condition + if condition=='Blocked': + contexts_to_load = [0,1,0,1] + [randint(0,1) for _ in range(40)] + n_samples_per_context = [40,40,40,40] + [1]*40 + elif condition == 'Interleaved': + contexts_to_load = [0,1]*80 + [randint(0,1) for _ in range(40)] + n_samples_per_context = [1]*160 + [1]*40 + else: + raise ValueError(f'Unknown dataset condition: {condition}') + + return DeterministicCSWDataset(n_samples_per_context, contexts_to_load) diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (PyTorch).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO CSW Model (PyTorch).pdf similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (PyTorch).pdf rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO CSW Model (PyTorch).pdf diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (basic).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO CSW Model (basic).pdf similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (basic).pdf rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO CSW Model (basic).pdf diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (learning and store).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO CSW Model (learning and store).pdf similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (learning and store).pdf rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO CSW Model (learning and store).pdf diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (learning).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO CSW Model (learning).pdf similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (learning).pdf rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO CSW Model (learning).pdf diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - EM (with PNL learning).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO CSW Model - EM (with PNL learning).pdf similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - EM (with PNL learning).pdf rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO CSW Model - EM (with PNL learning).pdf diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - EM.pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO CSW Model - EM.pdf similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - EM.pdf rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO CSW Model - EM.pdf diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO Paper Figure.jpg b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO Paper Figure.jpg similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO Paper Figure.jpg rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EGO Paper Figure.jpg diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EMComposition (example BIG).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EMComposition (example BIG).pdf similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EMComposition (example BIG).pdf rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/Figures/EMComposition (example BIG).pdf diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/ScriptControl.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/ScriptControl.py similarity index 93% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/ScriptControl.py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/ScriptControl.py index 43016886d3..f61ec5f75d 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/ScriptControl.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/ScriptControl.py @@ -3,8 +3,8 @@ # Settings for running script: -MODEL_PARAMS = 'TestParams' -# MODEL_PARAMS = 'DeclanParams' +# MODEL_PARAMS = 'TestParams' +MODEL_PARAMS = 'DeclanParams' CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script DISPLAY_MODEL = ( # Only one of the following can be uncommented: @@ -13,7 +13,7 @@ # # 'show_pytorch': True, # show pytorch graph of model # 'show_learning': True, # # 'show_nested_args': {'show_learning': pnl.ALL}, - # 'show_projections_not_in_composition': True, + # # 'show_projections_not_in_composition': True, # # 'show_nested': {'show_node_structure': True}, # # 'exclude_from_gradient_calc_style': 'dashed'# show target mechanisms for learning # # 'show_node_structure': True # show detailed view of node structures and projections diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/TestParams.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/TestParams.py similarity index 86% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/TestParams.py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/TestParams.py index e9893eff72..2ba7073f17 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/TestParams.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/TestParams.py @@ -1,14 +1,16 @@ from psyneulink.core.llvm import ExecutionMode from psyneulink.core.globals.keywords import ALL, ADAPTIVE, CONTROL, CPU, Loss, MPS, OPTIMIZATION_STEP, RUN, TRIAL + + model_params = dict( # Names: name = "EGO Model CSW", + em_name = "EM", state_input_layer_name = "STATE", previous_state_layer_name = "PREVIOUS STATE", context_layer_name = 'CONTEXT', - em_name = "EM", prediction_layer_name = "PREDICTION", # Structural @@ -20,7 +22,6 @@ # memory_init = None, # Initialize with zeros concatenate_queries = False, # concatenate_queries = True, - # environment # curriculum_type = 'Interleaved', curriculum_type = 'Blocked', @@ -33,18 +34,19 @@ context_weight = 1, # weight of the context used during memory retrieval # normalize_field_weights = False, # whether to normalize the field weights during memory retrieval normalize_field_weights = True, # whether to normalize the field weights during memory retrieval + normalize_memories = False, # whether to normalize the memory during memory retrieval # softmax_temperature = None, # temperature of the softmax used during memory retrieval (smaller means more argmax-like softmax_temperature = .1, # temperature of the softmax used during memory retrieval (smaller means more argmax-like # softmax_temperature = ADAPTIVE, # temperature of the softmax used during memory retrieval (smaller means more argmax-like # softmax_temperature = CONTROL, # temperature of the softmax used during memory retrieval (smaller means more argmax-like # softmax_threshold = None, # threshold used to mask out small values in softmax softmax_threshold = .001, # threshold used to mask out small values in softmax - enable_learning=[True, False, False], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE - # enable_learning=[True, True, True] - # enable_learning=True, - # enable_learning=False, - learn_field_weights = True, - # learn_field_weights = False, + # target_fields=[True, False, False], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE + # target_fields=[True, True, True] + # target_fields=True, + # target_fields=False, + enable_learning = True, + # enable_learning = False, loss_spec = Loss.BINARY_CROSS_ENTROPY, # loss_spec = Loss.CROSS_ENTROPY, # loss_spec = Loss.MSE, @@ -53,8 +55,8 @@ synch_weights = RUN, synch_values = RUN, synch_results = RUN, - execution_mode = ExecutionMode.Python, - # execution_mode = ExecutionMode.PyTorch, + # execution_mode = ExecutionMode.Python, + execution_mode = ExecutionMode.PyTorch, device = CPU, # device = MPS, ) diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/__init__.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/__init__.py similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/__init__.py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/__init__.py diff --git a/docs/source/_static/EMComposition_Example_fig.svg b/docs/source/_static/EMComposition_Example_fig.svg index f3a5662f21..7456c5d2b3 100644 --- a/docs/source/_static/EMComposition_Example_fig.svg +++ b/docs/source/_static/EMComposition_Example_fig.svg @@ -1,56 +1,94 @@ - - - - -EM_Composition - -VALUE INPUT - -KEY INPUT - -MATCH KEY - - - - -RETRIEVAL - -VALUE RETRIEVED - - - - -KEY RETRIEVED - - - - -SOFTMAX - - - - - - - -SOFTMAX GAIN CONTROL - - - - - - - - + + + + + + + + + + + EM_Composition + + VALUE + [VALUE] + + STORE + + + + + KEY + [QUERY] + + KEY + [MATCH + to + KEYS] + + + + + + + + RETRIEVE + + + + + KEY + [RETRIEVED] + + + + + VALUE + [RETRIEVED] + + + + + \ No newline at end of file diff --git a/docs/source/_static/EMComposition_field_weights_different.pdf b/docs/source/_static/EMComposition_field_weights_different.pdf new file mode 100644 index 0000000000000000000000000000000000000000..97ebdb431486a98918546556e1241998fe7c8399 GIT binary patch literal 32265 zcmbq)b$r~)vSt#q9dlxaF*7qWGcz+YGcz+YGc(&UGcz+YJL{Zt&)xgp?*85%JFSt_ zrBZiQbxWFW^c8_LzaRw-B?BZu!+GT^BprYnV5Msc$<7X-5;w3kvNs0Me~M%w0RRA% zpozJ?f$itkT*uyk-$2hw-vE-66VlG!)trP|?i5ThA0>%T&tciYUjQZCqJ?Ge6S@ z?HQfAYt)Y3)_MQ-e3^XPf9umXdbyo#x)u6Od)vfysrhP1OZVQBQ1m(+IDoRtyVal| zN=V7+qI8CFBO4xY^S*mGon~}^TA6maDel$mc0GA`iS_&O;_%Y*`C=W`tNpiUYg^V4 zA6?gr{=;n*+()R_rj(W!UE09d@67TPt+55?E?{g!^N(iHWlBQDp#_SXX4GGcv5CG} zPp{OA`pD?yRbX|^Cqo8rM<&Qq}CGffQsZZ2hoK7iboxX;9 z??WP}lBEPsxSF*m1X7Vbp{HbCv=5UOCLKqoNm^?iFDK~tIVPO-S2+#SR4j?8T*Dfq zpqp<778>nNb@%X&hY!K{Q*XcjQ7k8wDr{ zFkF*p%dUYMrwd*3F#S+2hgyoF1n9tDyC5Bw{Mst&F}34YiPCl;A4}=kGQBIUh zKEoT}0|+RJ8De-rRKry7>T^rh65DRI0J_DR-ngSZWVpg&doo?_l{edc7rgX+{JI-@ z(+S0^Yrf!eDW~poxh=QdAvDQ&^KZAsPgiL!Uo8N^Uq@bhTHoOwI5!SDTyQTtFkPx* z;gy8)70SY-S#&O#F=TR4T=@HCSYPr>~?HJ zyWPUXbWh2=xy|!eOfw3#x%Ku^c$a-zen3sC4gqBoBXdOc8^{dnf}Ec(OLLdCShw=r zMwN6o+6x#;vx<4(-YvE!@pnX0Sb$pVqZlL^ugY&dxghLgrhMUzEz!<%N^QRt3$=8l ztZfqDec16HtoKPyF=0Qe5$VDUrTP97S-JLTPXaWfEtUML5Vp)8nt7{@)KGOj{!p!D zy+3ya*EFv{=^N*%{8qY$YPXlm<1$g!#-@k+o-W}aVLBM6U5YD)BBW=fWFaQ2Z zVP1xJAzHRlfsU5Z3M4L?JX+Ou^u|$+&%FbVbIXJyPpO)qos9W6w$h1%J2u{wLZn&F z5CCL!io67p^CVaTd&AjU4ZVWZq8#wcd(Nh%u8jDxs$YtdD<6AOKAU_#$6c1!$IjjI z<|Ry@>!WL2;rCeU!m9y#Y-Xwb72k9iRoP@btbhY}Av=(R`(XZX6%b0uHIp-X;^xp9 z1QyzqArFu{OspVb#Ng!`c6;v4Tj{P*WL` z^Rn%5Mw7=CeG^TABZ=bTX?4{g$ihdTe9-&WrWGfUskgR-s$T~d^F`|(0MYi4)uif_L2@s*)*22dyKl7oJ6LIOKZ5iBu zO&HmtbEHyp#0qgUDTL%`gf?Jy05Gho|&k2#{kz~**I&7_X@6Nw41z&>Q&#%9iO2Ik9a*hI>ej%?V7 zDxh{>6v^uoOGccR(qEuQ;7J$#yHJ$&X#G%7 ziGC@Hl^0Tg_FRt-D_yJnsn8%WCn#1W9(61%!@cl*hA^FzuUkkr`7Ju37HhTZZf5e+AxTxaO+ZpRD%JW!uk#F*Uh zks>?n-CW6){aOb!5*~Es#ZE~)lzHk@rr_^6$=zl_2g^KAGz5y6I|g~9w@tP;b6`i0 z3iRq%?5@9F3trMg8@b3k0=>=A5pqJHzeOhaKD#N2j0!hxFEESZy{+F7AFDO{r+IbX>UVk*FnX$7LlgB5c9wJBT9 zXoEZ^Fp2V}GASJNO>a_IsIAHEmo%kYmo(Mqwvl635td{3Wk=Z8Qp1F&s1`V5M9GhG z_v5gqZlM}Ww!(UnNcN|`{Ohe#eU>ZFiF}>k zvvaH;nClA7`7Sw1sTcNfp5#wPOSH2kdB zRXb7g9`|8yRWKq6wMwQMF+%z`Wpg%}XtRQH35J4{>(3%^P@d7bn)0S+TzXv8RbE^N zRM=Hr3v5xNFQs^d5z{Y^LqPm%DHM1;5z52 zlwEc+&J^|G`a*?v8vLEdDjO&}chSLTq8yinj-2pPv2Dca0m!i^&q|&RpZ;`cHl=Js ztLus4wa-+5lF$ z9@%0O<5Ov(@0>=`qrTa>vV-MiGte)E-y7B{dH!WUTnz#Yw?}zc^H+uu*#Ilh;$!Wo z$T!xxdApyN(znZ*<4oBHR6YB})Zc!NY)(K+&oUVri=J28dNohCdAH8Ekq4*}^py7w z!8i;%dbf5*VVb6bOlBv7@CXp}h$|eAbdvB%RYONJE#_Qg~yaWk7}{fq(twmsrQ_o$U)( zrjT+oq=BXWzu?U0-X9?J2mSone&QcGT1MtS3V*7!|HeswAtM22dm%adPgDe8Xa6Jk z{7~soe^vlgd^`Xe0F|E3=lT;XeIEFGO(ke$Y5yPEh7{Bk)BuKmu-2aj3@KBi;Ao;}AS=ZCN9AA9kTtNga7w3Nnr1M)Kv@ zedb66hR#TAmnEFQ_VOQyY_K=_yX+#yyy9w*%-rqVKHMU_%U>vRVlsq&VhtF`HU5~% zS4%}g5?%pCBc>#QHdoU_|zmGdy6wOlPr+{0aCH4b=hgZoezM z$ZVpGS_hJz0p&)fSg#eqis*EM=k`S7eq4y)AhUc~BnqqHe_s4rKToz`*>?R_{jx9`P9lHs5R=BldQWOiW%U*aqO=fg~+&C-Oadjn>S zNyoESVqMLxKFTC(;7xIClmk8Sv48V-4a6-hP;#12EbbSH7|`k_)?^Ou;oCc~Y!FNj z6F7R^)Q_#;FD8{qEFeC>I6>t&Ox}3-9*HO->UbKWV^|{G9EAm=!KHbc zVT%ad{Gt5K~IRgFqbYF%tZgO@hQZXREw*!C3eO7W+kU9B`Ti3Tec3+VVtlWgoYMLATP zDur$3a`#fk2NXv5BzyQAGs&Bg(Fb9ILL~Vjj3xp=Jr~#R zMT^=ZM+jq}g!TfSDZ-%@_rP9MvW7vg6fBc*FSaKZldp%Va@21UYV9O`EEXr2E?g&qUY zom0dJ;G1Z{saR~DxDQ_@^C9!rw8L#4B?Nw?lv>1a**xcV+^wu_)6w6WBVfrlb(}na03p#? z|3?Kr&UI6!o?E)|j?+;YZz|>tL_SQWwf~bGCP7BV>9x;&M#Q31@kOw_b~%v>{s1P7 z*cy(?ZnjhZ`SI9s-a|8oDjbnALTxmD#?&!E;x{%lMV*EGW`TrU0r(vXd3RQeZ7+Dl z9D}`Bwka%3iYUsfRmdCF!&uE5cor#FJgE1^Ty8gged1Q>nh1AN%g%O#pdwtjWPlZ1 zBRMG*C6xOIHtwY6MH}wXt2^?F;=%4T(lqM}_-*N+52QgU#Mn3SIq6#7bWR}T#UcoM z9%-)dzJ~=@fz!jw;1T0dcEhAP+OCCU2D5IdVpfVG;4b(GaQI2T0}cYI0kHRn8vU=LA%}p@9To7DufQHYd>TU3m#8{xLWDMJ-246?@S%) zZPG}QTN?y5x)n@`8$jwW(nYC}aA(*U5}MP+6TG!z^eeU-SBG9Cm#RDCsI{`6>W@Gx z7!Rn4 z8wVbFjL(-7H!t((ov4)FwP$zy8=beJbP|K0E~n4xT+F zMz9yCkXb~uU+6j%?&I3Oq+?kp&#|3``W~l+)dU*%IV`95nxtc{9 zW?I-?hR7Y(*yY|2k;G>795^D6Za5rzQ!Pu&;?1`A!Mw|h--}{S zPBjjeJ?})Jjg8Vz&&dVLai#O3heQg7x{hI)@S|^y#B{WV^2txpYUsPghK)HweClNK z8jG}*^R$%%%%w>t4fNtxTdvT!v&K_s5y7jF%4Qb3`_$LV@P%`XnqLV&N?B7Z)hNpf zDf+{LoUf%Zn?!#V$0^f{b>*Nbx3uCa(j0KfbBH%Y+Oilp%&9nIB54K*NE%VV2@o%zI8TYD=x-Xqqs!Pxi0{zfltDh)qTAGtLoI^>Qh zq&&%w0EJtfbEga{QC^Ri;c3N}VAklFya&Neb%-t3-l8>KmGTg$IKX_2;ix`Wkg1~p4KH>rfBm9YViY|=Bq}|&B%QFK zw1ran=Z#e8LQH0>JZ6O+`|4Q$^hYD(c`=XNO~1VH)4C}3)xd9ZFk@64+8P<^vT$MhVjv~4I>{&&il7b^YpS8|ACR*pW#fL zn4RLBn6NTa*gv1B)fqwJWxJ-;_;1EK+qvO;teIlg8yLBbz?&%TQBYPS%R`pWRLpl{ zf-Kl?P@p8N^=6OH*pUQz&aBKc6d#!`oKbdX6F^LbUun(uM^Zsnp_JBES?xL&4M#Gb z3a0r#epGQN*(XsHYs?CoZ0{vbpENe}>_ShbD6=)qsI0TH7H}SnOeZvlj*R64__SCaC=d)vStn3pcG=U|i-+ZJI6Xt*7~$ zHZL=2v(~Ppmrm(l-Ih92o=1<(xi*@UQZh$PcM!#co@raExq<>Q+nYfi>MJthe;e8&!9zh z8+_by?^L|qFIStZm3-%MxGkW_Q*2?Z`p$R4eirU?AL!uyoZ@<%5ogT2=89W>KXr_g z$&RYac306Nw={vCWGJeYcu^a?2_8qX{X$Z)O%7L{e(+__es7?Z zqvgG&o~()KQMxkv)h*?qXc%&yV)T3ii|YIy;Yw8RYv-?3hUa}n$1^>sfQq=73Fx!z zAiPrS4egr2f|>%ro``vaK3}VF#Y8@=I59(_AW%bVvN$dwKLZP*fviPw>~AsQrX0jP zw>8NCZV4PUWOJ$cYmNrsAvctH{ssiI=$2KoLQ9;TP}PT+@`gm+T%)97tPzz1h3ux5g5~4Jn*-E z!(vTBauGpH7dD};Hp!GVoAHRj-yZJ_Fl4JqNd^mwiA-ZvMIntHz=I(J^2g#!d;LI~9-%~9|M7j zJK<94;t{EP7L}}xq&Tl)+Y$tx?KK;v|vuwU9`Fn0lIHCl^GgEP>ygX2~h{O7cJ?ji_ zn5uSIbx1*#8bnRzP3FDl+0S;99BziYWRawn!8I$SF%w!Dhs2h21uTx~pvQ!m%G%7@ z4O$+`u*B)w!{F~UxUl+p#_Dc$K}_npVOHE=5^_%gkNaKSBPxFEB*@GM`w}@)YQ;F&X?ej$^Xe7L) zW2p{vZ?c6F$yGB3ebdqyi>bb;kdl1UXekWVPr9*qf;&q|y=}RmJEj{w+}AZiXzIA{ zPM+}4$6hBoqhr@Sp*nNAZ+i`Uc*2r>P(Y*U{uKdF4JyRI#~G!HS`7plqfSJS0U4Q{3`3gCI0iC4Vxyk!Pj&eULi0g?R&W?7 zF!1+0utP!!^W7$Wsq%oM*0Z;%?l<%o$^qFvchuHeSuQSq{mClCQ{%3tyMt3Tq@m1# z$Bu`c0~fQ70LQpRwR$I!+un<5Co(eZ_mTc(2BcxrA21OgPapGjq(li|WrbAl50M-M zEtItH7j9J%Oq)|&ES=WxeU5gSK$A5PYw-@LZarT(S?Nv(BK@#~w8kD(%0jHV2&o?R zH7|zSpJ8x<22#gn_o=0R$y3_|UJfNc9n%zM=-E~S0nPUITQw*8mC#X$r?ZUIW6-Xq zo!hmawzJuYJT2sCozEatjMUdZVRlZEPQBDW!=8onOYkK4han<$=iT;;b|YbbRsy2yRZt} zuVIem<~fL>$N7L;)7Lk@g#J;cNS~nV8DvS#x>PcyoGeBE%$|}p_??8y&2}e5Yz_Z* z8SS8H^%>M-$(#&swx0vs`1?JPkYrZ)_be2)-yME6I(sJ`^CnERc-1!QPXY8VF;Huq z>bnp`Y+Xpz@=-o0W0tbbrQT!YfOeLj8pMqj)`-we3=x?r@qw$Ugjp2~SlYCCdf&v~ z5>{0`J#D6wD|GHqSU<->OGwhm5k{n$n02rVvK8;qhFS*VB3o$6<& zz8D)7v%_oXum&aVAoF1TQuESX316PW57rS3--@YidkFuWZ;zh1KLiI|$@obKRP`uM z>(Ax4mwTh20($pBE*5*D39(CbmW9I=Vs_+dC_s7nWp=5ICy6?qfo}@lwSW2qxrMy_m%&Ppn%=M%LI@vkTbxfr0kW9YE9C=c2vv z-FXMMih$V@8d1NAs->JCF0`0yM2c!bZd4}b3k$!qRzDM9zXrrk?I%7h>t?u+t?F8$ zva@g2WpQTnp{gDfZ8^aUjf;-+4|5NTrlbP7H}+HqvJETp=f|q4r34y7D5XC`?;Ev4 zSSXSe!l+QluP1Mg{Rmv@;&-$|Vv%mAgXl;zHn{^Yu+MtuuNn)PCAu z00}wITSanBKdK!S9k&}AXIt=csBfNCJbQR8qQbj{8MBUXv;MlI6(0eZF^f+0_D^SD zf0NU`8v3vboEa2e9cAv+~vZ*F*6VQjvbHq0YhaJZc4*C>m6BiAN~4!qvba?R=5Gr zEGU*g?)Fz}AbrmWYpm_wqw+C>aWrW`C8G(5=i;|BlrO+rJWF7n&BCSI1}MHNSNQbY zZ0uiVw|`*pj;;x6_EnW}BeFw(-RY@X`R17G!*NBZ4vW6U!3?%P1$l`;wlMphL)*{! z0I0vh+US7Ube|W`9X1osY*;;5+Z7Z#4z97CjSkq6crtzL4bm>E$E`mL)f+Y06`Nc- z&}=8G2eNFN2=xI9l8fA@Xvp#At8)&NV+dk@=NMY@H=%9h{GW1lI%x+=EJ8XIiLSm& z!_o)l2ToT&G2OQ1$7R+;56*Cb#tBo!%TcE#KRSUIxtT<}zFEb(%Tg4l)Q;7UVeN5( zWKF?&2u$g4@lqoq^>JJ=Py9kA|hI`Jj=0?PL{8`3O-h~5G%)cm~@q-fh{Day%agDZe54$|FA!m5Cc4vJ37XalHGmYo`+ zGC-gjrm=hc%Hpk*st@7`?>YS-`+($HC{>LfneK6WEwPvW>=k1!GtA+#w~8Lz#C>v$ zn1cV8j5zLM_NEGtV;q=BYP-lrzUoQA>2VORsI!E#T6*!cKB1lj;~gzleQZEALRWVM z^XlWab5SQwd`y zOy;!B0hOoZrD|B7+EjD=u7S+QSv=lM07R%X89n%|f4+6mmG_n2cF_*#HR?6RxVS{q z0jkF15m&?q`E2SlD|J-FC?&rA)MD6HX0}5|6aH*x6-iVliBt zeh6yhX<>O--v#A$jgk+U_kDn6Ba~JbIcZZpWgzNI{oUE4(n&6cZ_ESI^coGwUq z-yWNu_pR`)K&_0jR#a*<@i%Ea22$q1W;eYsVLVQkscZ7Pg__U(2v$2*QAdW!5wZhI ziJ~hkxb<)7?da_?v&2URNof~&@mJ_g9Yf@WdatXFB+sUT&)S+6N3!&9v)S|r{7BWz z?LveaW`Frgg8VzhJ*yHCtt9bvJ}WF>XOSM+n(dW9ypU}O?mgy;VcRSPS*O;JiluB> z%nP*EIr5-SMSb3BDyVu&>X@_@inSjibyDZ&#H$m%noVcX44X*6RbVsn2I7wwBHI}D zV9j0p6oQvyXnM&^o+vh6QSIdNAgmh+>EYWw%-COX{jW(CC|bK8P$vRAQ8 z{_4WDoUyX1!YjuDtXj@IZmPuuz#aWveH8ovnfi=tqaPm-O5H&9-#t|6T;sChP?)!H z?jDl_wY0I+Zpp?e>+1#ssMWz_j`FK|^wrTXw^zSzdZVxhF*+0RANN&r%!(!EY-UA# z#POz<2ZGaNH`VKHI_`Mt!_R=?|jn+jIt0Hq-1fH@V{Sz8>Gga_^Qj*O|^ zwNrI}%>ea~qKY?$$v(e8u*#P?vw}0n_gy<>c>OYGa(h^EFV_{HOv}OsW^AayZ*QiTjPG#;5CVH;{cJYJGWLH3&3mRjBVmCrJ7rkTBo z_B&7us5lyRLM<5wP(NqF6>tNS6jpXNlTMK=WkCtczI>`OtSoPKiU0DMW$aoVHyWlczJS)Ni3-M!Tx*JkPv?T&q|Dcx(NwMmDGJW+z^3%>vJOX+SCw zwELWPe>6@c)KIv()#3?Q4uTpm!Y~$|9VL>DhtD>sO70WS#wU1`wO#c!zsOpzGhSwo z7GO=L11Z(5F;($h7oMz(*0z-xXMcIATy}NZ9^59GkJ=!+xvWYy6_C`}`AC z%ElqlwBur=9bo~_fH0L$RFhOsiuDJ^EPi03ly7H2oHTS6evfR4!4rpKfh}6wOaAas zlJavrA%Mhov&ef>3-#m3RPqr;2tAq;BvD_{c3?`HGgC5vwGJUEA94ow)kDwq7y{bc}`HQba_9u9i9F^!@`Ds(=nzg+%g0(cE0TWH_hn zR<3eHYo0?d%Nr9(%;dCPz{8_OX;^hi<^H~b*j7wSNy~|Kwv9iApy0B33djELp57_K z>%;z_MdLz#bX^3aUz8SABCWUnYrvYzxwq)&>z(4=X8*w}wnK;Svb^YRg#=)4#HY-Z zd0q$VBhBH%w6vpJnL%Z`(I+jPug#ECqXoD|S8p@)UngdxZo$bVX33?{4rTU2R5C|g z&U7+!Z8QgwExf79^$B1hm9Kc6tT3O3*}}#{r0yj-V83;~ukIwFayaZgIdN{%+cp8e zuxlGx(mHwymET4(_VGe+Zr&}{&3JJwqBq#c;(s8K+7E0Y;@s94ejP*jn%ZDft^Gm8 zOGBP#BMkTvd{&9~YCeEc3hi`X1b?ModfwYNa{8q(=xe?~GvvtEv%@dyaZ6VFgO(zk zJ(#~~*pa29ug;pk1l^%uKW2Y*#W;Y##zZ1zzlRR!bvZ)d^!Dg=vqs_phG0P`Jb`zm z&fuBS5A@f{60tJxK&J|Ye6Abv~5>Yw!mLp1`ku%vWxvRF*a= z?%xg_Sy~QM-`03S@qxUAG_pB7EPzQ;8lYrMj^^=K>k0Qs277?ID;@UY-GQw{2`Ms{xO8&1$$s2jE$8Ci?bPV$Ky%1 z4d2OQI}Tki2o8yT-S^^4}H8X7kmpQ~Kr zj{P~x9qAkTIkUObuZP|Ei|&$5AG#(WBTKMXpMq)+bt5;$BH5)#)>RoJwiEL~l@Lhu znvRIG56BvS03JB=CA!T9Dp!Cr_%{8a7p4tYuV*5nXklim-`pQ$vavk!3Y6 zhu76S)hp<*Y+-tBzY|A?4FZGdt-@1NE^H|^JedcFnnz*VLhF7sEYR;kh;5UB|A2&h zg(n2i;{^B4@Obs#rdva=<%2Oy*2P`?a13Q*(5(+q)D2BTumyB+$ zOwUZ;z~C8)vCCyKm2`_V#LQH4g7Os#2EXsc(5cDR@e-busMKj{m~DtNdx3t1Ch3db zuIGuHfiaOXn>3*^WvFE=RcJV0p|prWbk2aPfGa2q` zxR)AZ2Ks=lJ*GYrITbKPM>ww;6P~2=f}8@J^S!`jXuPk5Bj^cN3}>u29m0-qEJga1 zz9XC?9&Qx;9Q@m11|N4B4?AU8GZ zFRU-9Bm|U=o$`>W;2F;m53@f2GVDr&m^^=S$1NJ`M{j9K>l6?px+RZdn5JoTF}y`L zf9AoHPUPiIbEaNbD%0rQGWjg5VM-^>sC*!VY9yFEqKmg)m8mf`rU~`92{j11_~-wG z?b^8k7-p!1U58I-k$t8hP5sh!k5(-PA78yDAt- zrJzyAKch@?2IUWF7?OuHeJlFtBgL2<>BKo%bS-K5CT{l-jeApvv~iDmqWutKQN`&2 zsF>p7LbSBWOJRFeej%?MRMA~{Xz1)Rj=6Jbx+)qcb zG=w3VFv3riA(nPw6gl|Yc{|Hwb}yL9cgU(-@E;o{Ou)?Jc!nD;h@d>-CtwMy)kxVg1R9jC-+7C?n2U~QRS(rw%3`ZzP&>)zwKn4DIfiWxUmZW8$fKHN?fmZ6n`N+od)1Fg6kJ{zS;cP_4ZoJv)TX7{<7Cp=ow z6X|k%Z$Xf%SS+RjfdZL{#C`PWda}7i&5p9qykr&`Y!Uh86;rHMs-={3sS5^`ViJ%I zg?Hy#7bt+e4g3SyJIV0)Lx*f1+J$pV$4*Wde}2oZ!SSPq)k_gV5i?U`l6uM*xvcR4 zo4p+to;-0oZws-GybacQ1S^vlwqmpIUfYABk!7B(d&H6)ORPs6v}1J^N^v;=D5y1 z2X%qJZgP$my;H>xZ(lSeQ5ZaaG6`>O{VV4xDabHOHVA6jtIy7B!s2Es3B;6WS3215 z8*AH18pKUv!CI3(Ft23^FBDTjwM}l@BYg@^ZVG!W8`@NkEk&_BZH@F!6)e^r3MzhO zcoo%Vl-CL_EW}lK39XIvOVu{2s;E_zR}jq;xu{hvO5jzTV*;%$hm{9f0u zW%dVp$Kn~W5IKle${M;Fwi+rM;+*AnmJst(u}(S8)}^M2rrD;|rb(xvA_L!3w#bqY zkIpEMUc-9AT8OLz!i@vN{HG`bCX0UgC3m67R`X_A?!8TyLThSpQX*g53 z16<=RE%@IU+fCqJyl~rQ!IwhL-?piD!4r55>hu&4&1g_{B?oc(VT8=|;%0=~tj1dmwL#mHA>S=!XMlAoH5xIh@##V?eJI5OOHSR6vgAEKpYI7l|Xik^63yYXd z_QAz&?BVf$GOhn&_5O=$O+!ON&+reindT2~`M*2; z1LU&;`oiEc`2O+*=t%D}Ca~ORG&S<(qsbsmmrH^rb0A~4OqV&=-Bs&iM)qGmwkwWk zyk`Vm)6aK6dZC3ZlwH1*2rNk+Qxx{lNN#&(07+)ZBr-r>NtsUcR8_krJ_JgA|MJw9 zbku;V^XPF-cY=KAY0Zn6t!)8?@YCbM!P=_Nzgy0EQ{?2d`hw))@JSW37bjzsJ+>Ob zhzb184>g8CV462iespTctHU;IExs8xlVLujT!1H_eoQr%vn0O+j?2&!U&BaKNYls* z^ZgLlgfzeIcluk>Sqs0Wfu=xYiXNJ68et^*DiiC+gsWGMHl}glGL&IB%BK> zDo6{clF6G`7}!Y~ILTUB=vYe2Nyz_OO;E?e#M}iy{FescQw8vs4$0pN<~l~7oO9ZL zYxDliP^VyEWCl>sQ8NN)si{B7>eQcme-qV3>~+jd^mr_d%nbn4f1So-r}u~H&dfsd zSM`sjf9e#pG>m@<_;jp=4NQ!TKgsZ)&HN>jvp29%1Tg=7{-28QKb1d@ea1{2vRo&OhHjGP=)}|7iR*@}H>& zRtlCTe=^Vkf88Gbb8u+>$=UdiBzBsAXIA`Q?-&0iWq_HEn*P6~47j+(hky81K*|6I=DAvGv5$JK@(Xkh)ODlIJpECkYoiXkuq@|opM zN=r$+6AlxK9`1LlY=H3##Q3K3kdWdkE#>jRV%?H@ruriybQNfq~2iu5xYpaq3$nz{LR1F>QJlW zq*U$tzAY!OzC4wD6xA(Tu6Zi7ulFzRZJUEHw;t-9H2U%Cmf_{0K%)s55*)bz|7 z*3UgwiVuGf#;{hReUF{)heUZ9fVhzlf<#;j(;&u1y^fX?sjaJ|y!l}p2V#m99_y!r zRxw@|UjQkiE{MmaU;>|tXG$&4O6)7c9|?zYU4m>ZqOX@j<`dM=w%McMjKWK812iWW z9SXj}Ozm@NrFIxP!bQ*TCV4-^#gCo{gkW*RqrjoS(LqFRZ)Bv3tYLXOQ24vQX$ zE)ArKHA-s7EE*aFLlWU{>tE{`k9-pdLqX!me8#AmjRU#QU+4gerS85uL1anNGG@p? zPqaX;+PtxV`u6gS4nx^#x`Bxvr-kK065zGHQp@N$I3Bf%g~zl;`xwb~Deu_9+U2oP zE<}0r`(oo_kBe(@!BN8&$HqpZ8>GQB-XJ%52m@QdHvxx#E6b>bT993X1^>jp+Vg6} zZYK|FW;R^$K{2zmJf2MEr}hs}*pXV`a~!b{56sTPqv<_fogF_eY>((0c%J(bw-&1w zrx;A=Zm2mhBY{T2;8g`fB2a8}Z9dH`3T>!s5cM27w302-j6S?1n|g`l-rRDW+N5X)zoW)EX(7bi|Vkm=O1X18@SeX04QpjP2jKA5mcy^`W=vHb?x!|f$)}@G#i{^Kp z=eOaB={bn-Njjo^XBUukkX6hLMoYx+rX;{B$S&~y0gM;=68Wgl$C9h)-H{0l%k)XUErO$Wo?kvm~ai!2QKQOi7&B z!5~}P;b>ViOA#8ghE?!An1*H$Tf!RFh>4tqKjp+y@PwO9(r>-Q=cVKO?gBK*G&cI1c zdhbBPkFnA$&x)D>=*j!nbI9s2S4q&A|EsjKfQoW!`##+v-7tiN)WFc)odVKGhagCI zm!y<*cY}0EN_VG(pmc*Ye1jg(?G*8D#4yiTOlgc(OY*?ZX20+&jR0kNt+c6OeED*8GQ*5Z<6 zzZfAl{0m|%;@t5ML*rlciF@8=_qr`Kk!sD-SIOmzoWPfnqh{?x*lDqk)txcChMy30 zdL9aW5?fq=oNH65{)x3l@qy5-rymt{&^z;zE8@O=zkwWSvHp`GWvPS#a^`|WU1h}0 z=F`!Zn-?c%Rd+^COLoSbvyOH9>l^blhuu9DiN|J(7yHT8WQ$v*9#G`pWiz88`3o0Z zN0>wXeDG%!f#bWka8yVRBJ)+Xs)?OC#alR(WjMFN{lrDhMq#_nyg4&o-RKG>VjRM(XGk}gftbaz(**Qar^_FH@|=_83YN_Gif>o!;$ zxyv8cJM)sTj%#?QdQ%HCxRpf<4R5Z7iWmWJ;X|7h8+>?$X$L(^@B^y`PAH-<@L_ti zCdHKxaIbuNS~ee^yf!}@a#Le2{$^Lrw`53yrpeMm%z6P(*}pht zoP&UZ_muJ7<`mYnhN7J%o7=io$s;TNRt*bKN(2=I$1^}u4q$Y9Sq2e60^!Tv`7I3gngXjC zf_W7WUtmo$(ga@*+kvlQoghumuX~)%&0oHP^9ZE^di@Lhw6Hxvpgs|KVgd@EDMDuu zjBN?eM?H8RtdM*d+afS*COuuh6(Zp4TqFUM!TuyisL1g|#5@ws$aq)VdpeWFCrc)2(Hd_fv80Gbgop;yDzh^!48<)CsjM zdEV%kp$O|PaW^s@q;Tc5s>C^9F23gpOq`;t6bM2MLnVd5l+$)jLz_{9(Z?!MTv&3D zHo9BFs9D8C(aJ%gKrY(yTqhzu01vTp(bruf`|Ju&rYwVZ~}wpY$h?Ug{m%`^&-Ug3^62MRtoPD4c0uK_gg zaYoxp(|9B-a45XcYg$<_JQggcko$U)Un&5{B%Z^P^?g$cG;TqFmWWQM#d zKqf&RaXj|N+No05)K35_sQ2W42box-h4nU*JFl6Of=Y!-EQ&EKNXT0itIUTfe4;$Hjj9ikD95T>AYmEyUZc6ssd5Xx-aa(3-QPFZ!A<beYI~;5&(<5460qt2ApZH3LQG?c8(jqlD6~Tg3+t?cp&&YdDy?Vx;{8##6txcdb&4%MfMzc2B+ zXgIf^FB11Bmp?!IU#iY{BrV$VCm8V!<-KqS#gRQov2=2GxBKwD`%zeX&b!74` z_tsXdgBI(>ApcxfAEik)r^r(-Mur&p{?l&QCW$L1)0|tL3#1jsj5t&ZyspVU7*}6% zrU%jM;A1js68aAkGoSF!cp^J6S^J zX$?a1O{(8$0O}DLp)g2+WUlXI5`w;3LtNUQ=O#43zi614RB?ogC%)erkSN;A6~Q)q zrIVmNFgQB^kEUchkY_aaWHY`cmK~Hm_MQikX-zR%uDjB3yB68E0fl9i1%{xq6E-OB zHhRkY*6Of=R50|RHIJiLz0)zRyqlT2q#vclV)Xo?RCaJgSRc#D!qTz+)yCTlSaC`u zzTO?o+(uF9(Mwppj@-8$D67qtrm8+9*@SEcPfe+&FFaqY7I=Q^o)@sENLoIt5x2d- z-I+X3!V;pbyi;KFV6yb%VhrrK2cug;G4F>=wPW>08jO0ky>olraptb*8=aLn+{|Wm zs%TU6UtyT+B`{BGVyh^9uw~K;dSP6j$y?2t9o%S^R1o2wsIXqrQ$HTE=WVPlH)AF1 z#G%rTUWt7f*N(r=w&-xfb^&E=`(1vROE8Xtb@?S++Kb zpAHw{)e?8`e6joxYc_hC*`AZ#%Kc%nbZW+o_u^%$$5nOuhI`vWh~(h|0($A3dewuk z**%Ya|E6X7ZCq5xN}4O;%hQ6o>1)GeC&iA|q4B+kiz2D5 z7yMw?hQBOW$@eCbLx_ChPivP=fxw6Jm4Fwtj(wc0lADy0eAs8y)Z`%=hCvh}-2X;= zf&7};O5ynBYX!+3Go^4176l*&VFH2XGafK^&4OZ*=J$CZu;^IU{{jj1R{HA(@r6bQHN-orR}1<3M<3GDX&3emq zHOC?Ed+P@%lhXnAvqoz>8~nTOWcZw8qIab`Qwdh2yif$uM9j;tolVfNCe~Re%Vy+Q z>R;&NoLO6-c!zLBybC$K3e47chF*{6jAA?8=QzV78DUAyslq93UCF^k-5CEy*5P#H z>CDqe*Ye#Dy1n*2^oev!*mpP+({fm|xouh#yq=}{kSmMvyVGb8AB#IOmG>;Cv!kc; zqMwg!*J((#ALdlE}-PYxGL`&`pE9 z*o9XDfb=6`%518Tf=8Y+NH-#>%(63zZfa5vy=IzYan#PZ&W6{0Mzq(EsCuS?JFZr z{OO2BgJYlO&?w6pxsfu^&6^9clO>~N_U3kec;mDpwzP*6r1G-T8=8do?c8)rcqYWGAMFT@KwNjdP3TILXBB;6my@7WSJKHdFLa-hk@q#>BfT{u=(Ly%1 zJXNC8Qz{p|Kj;1xV>cW=Wg}HWKQcOdLQruRyErJ$BCBYv1T`!?OkRCHS*HYrlhB53 zh2mwxBC%dq4dDStUGibH`sO`_N1JV|YwYKY;|L^wj1tZ-% zx5IkQdogP$>&qQ;rKUi;@@-z=x~UEX6Yzev8HdK_iV=Nbe1RZ0QF+EaofKk z$d-m}TnrUszlPH&wC^M8I2Ie1xEV|kVyz@gX7WnwEwSZV&{s_pujerWCjDU>-+Ytl zm<1Lf{uF! zgV%D-89B?jPq#|?y{$S9`)8Lxo~rha!?#iPPBnY|`FYI4+Lzz<^Bpgiy}B=BV!EN9 zIhC~>yg3c6m@vDu?;yQRbjM4I!Xr_oaT%JUUbNCHm>sNQwFGgY>U}5Jw4S7=?%8&5 z4T&k!>M@IOE>3qV*W=Vpf0gu}y^DL0eHiq)d89G0&swlc;|?y3KN~?~@)LpQ{p*bH z0`mnQ+YTmq3a)ELEZ7Szh!kZxMmQvCzh0d#y{tdFcVr{;_MV-5>FG%9Il_`PDYYb)mxijwBuHJoK2 zM{`*9j>E6tkKF^+yJtN+HR!12Ss3HLC0W-jCpy-ji|?AHkG9b8PY84&Yks1?*|3^t zK5c`?la(TYKfI=z@V)$gd0y3b3hzmpGP;T3NeYKGzHSumm85qUQLirx)$BA*0Ei$q zm80^kJ|&6DKMO)Xd|FQl+wYS2Ow&m%TGN=WF`_azY$F?DIjV7Z^5CSMaz~m>>fD^u zW#kNplwXeBWJzo;YU*lIVR|ciq4tBu9BFtQDv1`yS?3~O6C~`VQDC*nCjzgNwZl4_ z#_dSSqx8~@`^ypZgj>@GEYKaVQ+R3WqUWyfXt~yHJ7t+>@H?-MIrsI<0*$qsyGttS~7Um_0g!tVSNIbeaARk%AFQFR+ID4A~UO&l{LFAs~K1riP_tdKK`)KiL*mtv+I91%FS9Q!DH!&r= zs>4VIl)t=g#+|Za?yz$mxj>iQv>oM$OJyR&5eEw!ymM=Bqh~U0#{d_;Y^*lVbB~9qsMSC(D1icRd#}IM+OW6a~B@?gD zpSEHA!={XNP1~mU7kHmq=6mM5$2fneoC9+1zPd4V&T+-CI9{RB_ALsjG!2OPt2p$$8$^Vq%m4O1q$2I)s3H;bVnTQi3Er`qh@ z`n7{Fs=|6yvNKF&#%?!5in_NWb?^7-B^cc{kDb*&*Ka1J*m!+Q(za=>@on+f^f2{9_Hpn0Js z1ECRtCCPfR+W+3qj$Won7_*8y(-r#uJ+$0*EPuK9gf9aRDVOC zo3dv*qA-Oja0v|RAJ>M*VbFvLwWRAgKb z!e4y__HH3cjX)Z`xRsHMWo~#HG~50IKx15UYLJX0dYDNhyL{9nc~Pp2CVa#8LJ)wd zUuFf?Sd)Cb{?1Yd`RZ0m?)TAABc zN84A{j5D{Gm0KfSn$z2DtvZ8`6KvmxVv}_i;XC%*1yS7W@k%SC3fs5wojy>z@W{wa z+OB0f@Bsu#f;+dl7BeDs(`z!HL1^o(aoF*42l-Bm6OD0TOIl%+eu8xB&5(rA5u$v=A6#E1uLJZXH z*#p=mR{JT<$LAqL4Xkwv?}51d=S+RuE0E!onV@~Y>HFGzYD zC05{JD5Ynz?sL#1+Z7LW>PW;}Nwt35zuTA@LE~HND#KqPn)vS9 zR*VFwjjk=P&vG1gsJD8dg+4YR17)IsDn(v0X2i$CB38;!>ujib4|f6{W@h^hPa(M_Mb?{2E~A_cagmPRqtVfk2KKt_s}rI_-Ws0y zput0Tw>QHL!R4(Uk|*-zgXy00sT*t1ZPL^kNxCgYTdTEu*7`8y=gB6=(I2=giR6rUWTqJiV^~<0_TCYHopH+4OAa%zu5X)s zB;-s>Pb55XNsT#wFLA1Ns&13Gd@^yp0e$&pF<4hi-M!bSAN@Fno-)S_CF5lEdIPPT zzOG<<;AO(b$2r!vGFu?1|4YNGrMbK-vpU3y%I@0O7pVEH=NPMp5T#*gD-)*dXKAou zO#Q<7tW$51p36}I)L_P`5hyV5C8jp75&&01Z zOw>1LV*u6Qb3xNipLJAY@Z21>I_p2IU!Yxwo{GJ$AumqjU>0=jumCxR7a1C}ch5O5 zb>&930W@XAVWDzKw#OnMI-68fEe8#*KQ~(riq~>Q;3gn*AK1C2RW72M+Y;tAlB24j zX4!QZL-%EGt29?b3@hZ!`z+91t7t!DDSo?@!g6L&f=W#HH5#O>V9ni~u9hy}9opY} zoe8BqQj>cK<&b1Ubmk(~b5d)EZ&4RnD4D9T0bC0e5TNq*=zIR$k8QUgwMn&6oR~=n zIk8CWX~s(z#LXB#SMHiOGwj(PjX?;M(ROScI!lD#OQ(Qn*0R7ke)FsD*XPul51T zG0vZxhganN;oy!J`-U)Sxw_OATo;JAasJ%bKXSUy$_*_)8W{~q8k#!fV zsI7jtG*KcuVtLatztr>tiq9+2QB4Mx;km#vT;%i{9pCUh&wa(q_%JB$K$koEkC{QE zEwfd7K}2&NO1*^B9fbiJA`FehhJr4nD6CnFN=S-5)yJrpFFXWY13hxZ;)Dm@d~MWM zcqZOfBEEP;wS@4s-guxov!$i&R`3CM2L8-FzMvnOznS(oSJ?E~Bg=GZu#eKH8avmyRMtnKEjumaJOFpoY+3t`?`VX0eYH_i*A{D%2LZcCn;c z@aGw~tPGk44BAMviCG$)n+lD5Hpk>6-gv-lR!J*F;76ms;o^aDEXdJT#%Lint8PvK+J|2zLE!ipGk9vhYgFw$lG^d%w&tO2BkiCN97{_K#@N%UBW$a_W8$l za#A+DbkQQ4#tRj)ED@J=;Tl+yu8UnKUjlxTTA^Xd@!BneeBbV(tD;GuAE1e}r#;(t z>&XGNviV0uPhZ_jQ&YB@6XR#;JP@@Wt8q+1=s8S$Fz?8PtxaF)>^ym6MBW6c_1SM> z+}Z_m%Xtf$!InlvF;4+X5?##5Kp<_4acP*!-A3_+vIyn1uQPWXgLs={MRt7$nQI8c zSE}+vl|~|!!%i=P@cd45Z)h8@NN=k8PJ5zLB<&PJSe+Ka&yqzE_dfT2{0#{2dMBj> zG-XJQ$^%JQo-C7uf6EFKIvgImrr`>8#tI=eO*hUk89kcsU#G(=m4dlL;t$p9lt?V{ zOF1UFM7XCXkT;Ef8LgP~vTC%_8`HF_v_edFjDJh>jI!>FfeS?|)a_=|rA8kli=uJz5v)>ft@+2KFVM`$1#R6QPrL{bVZLMeODjM_v2CYxs`P zeHa1m@W@Vk6wgp@t9EJ^Y~oKU$MQ!@p@$uctNWJ(ONbv^A_Ti%t{ol=9uq)mXB>T! z^`nAMf6^GQqv$0~N*=uCn&KhqWtu97hGh~2d@mQSiIk%?Xcb_FcZxUDMQW-dv}tIK zRsVi*gnw{?O}b}Qeda|hmCW+{r8p}^r~G@UszFq~ucDBuh&dUkqZAs4 z^6j>~IAU(3X}CocKu|{dL*Z$3`aM8YIjm|w&2B5hFGDFqy+Z{<=tG(=&r8!$ zE4i%#BC3q$H1{0$jP~&M@RsBbP!B+Qqs(iwM~L@_ok|7)K;cAMHUy%8&z;SNly5c% z&GFpUZu#H61J#4rhnO6g=?p$^FrMHZoieb!Bark_>38zLYlL-<(K&!?^;2CP9GB*R zs_}w7+eFXxBM9C-GOl@xqEO$nIJY9A+TR-PHiu_mZ6iF$d4$>e-lJExkc9F%n&-VQ z!6%6%&N28Q7KX}qIp~^4ZMNuFs36FORp&i~a#GkN$+6QRI}hXq@mE5^sTi}+WG^GV zk?jnSn0f?R=sx+}?~0iWy&PvAGdvO4@%8k;b3+(4+RSlRMrjtA$yPYAP5{KUdUDFAA(>JKsU@M`m zg)PTUruNNo)JN|TMcDxh z;81<)I4olq;zLp-AoXtIu3f*5{12->pQd)7Xd%3ek`Se)J?zmqoL$EfSfKiHzwto($YYodtiHu{YY5nuc-lWGtz8_i33tjLM4j&yi=@x5 z71cx-A<3Lw0E;$^n_bysdrOmeC|?*NCsU@M9E0qOM&yI^Olg-VO3=9^r$B+A=tV?B zLzSV+Rb>@<-qUV>cpP0-E}&MFAb=Tj1obP-dmq8Y&GQCbg(KFgQrvkfRvEaD3O7D) z$?;K35vMtx9ZMMz6JN+O(dBNBS@k7)>PZ;s_?9jp?|ynOQI>3HAdmA(?_BUz@pken zZ+Qm#_k~q&GM{5T{}Nm*Dg!l|sDXaTKb@6`c{qM#OW3O172yN#f8_UkT}!F#lWop( ztP0)6%-Z}9gvpkmEy-XB&TVA)Y>tnRO3?bYX6BkiPLb_|BB|_Mi|q~^otaHe#kJ=1Hw}t->*5 z%U!Lft*;I5uJVKldkn|mT4XzGBg$&W&fNfppN(PNC`*jaZCePNRJY`|N+(V&k*3E| zd&FgQ@Cb3V#+nK-;(I0uC1YQ}F7lH)D(-NdOwpF9?1*-_yywJD(cQPK&YF=f;aMvF z(ps7OMnA<$8mn&Dcmxo0?2%j4Cf8N7gI$s`rKl+2M%-HHh0oio+j>YY3; zq98#%qa9a7VVLqMyUOQ82}I1&!7QG@%a^TjQfaw17UPYxtX<*g@AMMO8vvdiN4YQH z;82RItOvqpUfjW&w7=R$>ouiVbz#dBC=8FxDXIi@9l^Km0+Cp3bYNp5@^O3~CNz^a zU~xVY@WBZVqy<7-3Xj#+%>>0Q%=FC6ho}ywMo|Q7T}0g(d+$~y_XzYz=Qho_uGj7w z36;RQa9JVwNz?`&>0&LaddwOhsdZqe93($qLDRe2>QQEqk=jYfXmeYLe~a_nQFLFs zYV9l03R9{{`E9|Le6zXYEqjGe4|DY(NBZ(S?@Nj7fI{zJkQaGC=i801<~898hkTwn z;rHDyhrW@Ay_T4457OvO$Z-?kCEY8Qaokv)@oS{L^Q)I_Ct?zrG(WWBxI{NbPwqt7dfNbH z)7H0^0(aQ(Hi}u) zg-8gwUBSl_!Up-(xl#oHC@i|@gc5pX9zk7I9LD! z6h1l-Ss%Ln1%zISqQ0C@%gbcK%tsR}vt z^B6CLuLnw&oIv(cb|7{I6kvlA!Q_NoZdVjxcmuj!06^A0wrs&n&%B&NKAw$&iD34m zfYV=t1pJZ5g6|opzBv??6Ya|GNyKbW?}6O=YaPBY>#r(w@TH>KoM^|}>!SVpnrYF5 z^r;R-Mh_`q^`V3)+gTw|VwQQ6ds zaw6I_@UuPxPC!Nul=LBD3ySGYe&Ot=*Sdwf^2cpM`#|=^f8)KAH$()R5VxPR@mrI( zVVS9v%#Isc&_&U>Rp{n;K9vZWNcw#33_hq3eDjia3!AW|V|{uKPd*9x)Z|puT+VJG zVibizy)tG4W{UXt9fd*q>z7oqi%j+9Ge+eKZ!F(H<_K?$ocoLe?AG@mAJ1jl&7UOI zBLE4-OoyPt;i8XKAp8Nsr0(RL%t{l@FsbV6aB6ri2n&M4pT^~I7MuXWKUM)fc8wAROKxPsQowu5?nC{p8o z(<8_+UyG6Ji9a#&zL5o4hpff}t^2zfk(gAlsOALf#KK z6Y~ud^L7*i9I&UXeGVa>L-0+F`1;z6iGTvcA=DtMA*`V<(R^luPDvnyWkI4#MX05; zpsiyzalN2mQZ^WArQvu_JJr^J`PD8{LDECTD`^6|F-qwY@fxa1+lzNS8D4A@pQT6| z6RaRGNFgo+NNmg>xOt=6Qx+-Rp(*z-$v4b=ci(V_t^z-GDOJal`gu@9$VR{8L$d*c&d0A4Hmr~-a2pmpq;Um#kX6aER#<6jR`1M)n5O(vBWB-dBp-J zO(utEN;9-tD?Rl4Z7tC)-ezcjeS~o~EeQYN3yZX%TP=T;K};849;G>|DdhFQERhdD zB?Kf$a_u7|j`3#$+0t=hA8A8d`a&~J#fk2`=w-n9T|ofFCoB4*%S1#phJyeB7qOxI zm?1xMN}d$_p_tG1%Abj!-gj*%LG5-tTNlZBv!oTglOG<69yaqdU@F%|vxbRDt|7Y> zb=}=4iipPfDKJqLF*RGWG{*5NcX%`(2-ZoFBD*%KBh z3q^OYc{VwsMmM0^*xWYoSxXT`K)pAi+=y>kz1M|!N9@1oV%TMp)yb_l;cXBK>8VZ=GLcVTlZmXC;e8~TRSo7ie!faxdw~CHLSnpWtfcZ<4 znhKU3{4(a;J|ouTioxwj&lab4^NXUh`>cg==Crn-L_tu+n59hH^rh+BZd8e>f#Y=rfmc-R z7ao}-2mIIfih{HyAtlu%DIHGbVR&-oVd}oKW7G&yi&6#HWXlO=z$qW}*pU_l*5FCG zpzR8&(`Odx3JFD*6}#TL)HC-69|HIG+l$F6goatD4=Pebgs|oM8(!(^%POCb5&I)@ ztkYT_SW<%aS|3OwD0nsnG;DR@2FDD=sTJA~E7&fga~~u_aP|x4kj$UlF>lPjclZ}@ z=NE|rCKy;)*;qOMM0Xzfj9-Y#A4+~Ke+ozZD+B?SV)#?aPd8@JIsk|wesLAbu-N@J zAt^W@s(3p{ob1^?ypUnO)Kf8E&7+Hm{c+VDD7!C9yE;#$N5J2k-J?cxXu!c(?uMU+ z+AuV$uXN!;kTziVTC#fC^p;a)Qw?)?zpj*v-8xZCZF_@~iOF*AtSdIn-9uGtcY{#_ zC8v2SxWb-3@B7Xjy293rwBn{2+~B!`ywi%$0Xzn$n7T{v5ts7H&0l0;`ZF6Zkyig1>e+lDbWFDgh4kL)2<LQIwUX7q+u7u=*G3>WBS*V68Yf*ubn6E8Cx}75o3hT5$uxU$_Y7@P1>he!^FO zVXc0i^S@!Oe&9R5^E|(2^8XRG`ZtICpAoCaApBGt`9)Oye>PmK@Bc)hepwzd ztH(PhE9;N@PweUuk9xHH1g3u4xImzvXMvCX1gIX*_%R*~Y5n?}o9&So`#Iw0=%0_j z=KF8IA6NV9*yD(QLRgQ-e-9eVBM0`k{=@qC6hA%rKS7%yC7@3iwrIHsEHu9;c+J6Y z$?aq88&mlj&-guYWI|hgprI2=Xw(6C z5bm0o-v6w&AM5y|+BpC0n5_STnSx#Tzh3beh?TP*$npHg!kHkL9HDprW$NESOV(d? z&iYTz@>i<;EG-LDQi58~ z{<>oPM#r)KjNW4v|4)I@jgq(i1V9PANbMZlHiYTt5h>13VYtr8=ZEN^7Egw->!YY= zCeHz17K~4q&G;E??qtb4;I2Bx(c(I;eB@+x<}t(Bt^monV~e+2JFm@R0~n4f8I!&V zG|)DoZJ1#s3=!$1vqYXzK?t>iFmHezUvD^w6}yrr(oB&E;>HN0bOqc0 zt^~$EL4Wq>pE&CuW%LtZeQe$G1`c)>k4j9B`3H{OkH?>feYWzljrvm z`MK#wjVb#dlgL^aKk}tNy5%3&3{e}$N75!4NTk*2uuZ&IZ83#KOh| z7NVjxcW`*a!wlBU0$a`O4BnVq7}+z~*qPD&ymo@)u%zXbn9$?!)<^Tb~p84AxU>o;8 z$ATUBkMp=dfA=2rGKt6JLH)CPQ! z|54>X{y8_WbNG>oAQtcfk)A#kSCl~dXF5NoFyOySyn*NWtJvD3P~3m?;U1UqsALCD zcjmte!2y_mslfeB$L&`G{CSx9Z$fYYW@)5H2{^FS+oRgsuQL2!Gy%=)kM~ zvk$yBFj7aZ5${5@`rC3>6b?5-fj$r?}0l(18DOJ@Nu&WvxD{PxItp9>>}(e z?3^HRc5ZPFE>RH)E^ZD^F#&+?Uy=sE{6{uD`o_cw@FPvY?*C63J;nh1BJa;UVg<5u I|B*-k4>1qaC;$Ke literal 0 HcmV?d00001 diff --git a/docs/source/_static/EMComposition_field_weights_different.svg b/docs/source/_static/EMComposition_field_weights_different.svg index 94aab6b6a7..eeb15badcc 100644 --- a/docs/source/_static/EMComposition_field_weights_different.svg +++ b/docs/source/_static/EMComposition_field_weights_different.svg @@ -1,103 +1,209 @@ - - - - -EM_Composition - -VALUE INPUT - -KEY 0 INPUT - -MATCH KEY 0 - - - - -RETRIEVAL WEIGHTING 0 - - - - -KEY 1 INPUT - -MATCH KEY 1 - - - - -RETRIEVAL WEIGHTING 1 - - - - -SOFTMAX GAIN CONTROL 1 - - - - -SOFTMAX 1 - - - - - - - - -SOFTMAX 0 - -WEIGHT RETRIEVALS - - - - - - - -SOFTMAX GAIN CONTROL 0 - - - - - - - - - - - - -KEY 0 RETRIEVED - - - - -VALUE RETRIEVED - - - - -KEY 1 RETRIEVED - - - - - - - - - - - + + + + + + + + + + + EM_Composition + + 3 + [QUERY] + + 3 + [MATCH + to + KEYS] + + + + + STORE + + + + + 2 + [QUERY] + + 2 + [MATCH + to + KEYS] + + + + + + + + 1 + [QUERY] + + 1 + [MATCH + to + KEYS] + + + + + + + + 0 + [QUERY] + + 0 + [MATCH + to + KEYS] + + + + + + + + 0 + [WEIGHTED + MATCH] + + + + + COMBINE + MATCHES + + + + + 0 + [WEIGHT] + + + + + RETRIEVE + + + + + 1 + [WEIGHTED + MATCH] + + + + + 2 + [WEIGHTED + MATCH] + + + + + 3 + [WEIGHTED + MATCH] + + + + + + + + 1 + [WEIGHT] + + + + + + + + 2 + [WEIGHT] + + + + + + + + 3 + [WEIGHT] + + + + + 0 + [RETRIEVED] + + + + + 1 + [RETRIEVED] + + + + + 2 + [RETRIEVED] + + + + + 3 + [RETRIEVED] + + + + + \ No newline at end of file diff --git a/docs/source/_static/EMComposition_field_weights_equal_fig.svg b/docs/source/_static/EMComposition_field_weights_equal_fig.svg index dfa96297ff..a093260a15 100644 --- a/docs/source/_static/EMComposition_field_weights_equal_fig.svg +++ b/docs/source/_static/EMComposition_field_weights_equal_fig.svg @@ -1,104 +1,209 @@ - - - - -EM_Composition - -VALUE 1 INPUT - -KEY 1 INPUT - -MATCH KEY 1 - - - - -KEY 0 INPUT - -MATCH KEY 0 - - - - -VALUE 0 INPUT - -RETRIEVAL - -VALUE 1 RETRIEVED - - - - -KEY 1 RETRIEVED - - - - -KEY 0 RETRIEVED - - - - -VALUE 0 RETRIEVED - - - - -SOFTMAX 0 - - - - -SOFTMAX 1 - - - - - - - -SOFTMAX GAIN CONTROL 1 - - - - - - - - - - - -SOFTMAX GAIN CONTROL 0 - - - - - - - - -RETRIEVAL WEIGHTING 0 - - - - - -RETRIEVAL WEIGHTING 1 - - - - - + + + + + + + + + + + EM_Composition + + 3 + [QUERY] + + 3 + [MATCH + to + KEYS] + + + + + STORE + + + + + 2 + [QUERY] + + 2 + [MATCH + to + KEYS] + + + + + + + + 1 + [QUERY] + + 1 + [MATCH + to + KEYS] + + + + + + + + 0 + [QUERY] + + 0 + [MATCH + to + KEYS] + + + + + + + + 0 + [WEIGHTED + MATCH] + + + + + COMBINE + MATCHES + + + + + 0 + [WEIGHT] + + + + + RETRIEVE + + + + + 1 + [WEIGHTED + MATCH] + + + + + 2 + [WEIGHTED + MATCH] + + + + + 3 + [WEIGHTED + MATCH] + + + + + + + + 1 + [WEIGHT] + + + + + + + + 2 + [WEIGHT] + + + + + + + + 3 + [WEIGHT] + + + + + 0 + [RETRIEVED] + + + + + 1 + [RETRIEVED] + + + + + 2 + [RETRIEVED] + + + + + 3 + [RETRIEVED] + + + + + \ No newline at end of file diff --git a/psyneulink/core/components/functions/nonstateful/transformfunctions.py b/psyneulink/core/components/functions/nonstateful/transformfunctions.py index 86c1db6b7b..99733aad2b 100644 --- a/psyneulink/core/components/functions/nonstateful/transformfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transformfunctions.py @@ -2216,7 +2216,7 @@ def _function(self, elif operation == L0: if normalize: - normalization = np.sum(np.abs(vector - matrix)) + normalization = np.sum(np.abs(vector - matrix)) or 1 result = np.sum((1 - (np.abs(vector - matrix)) / normalization),axis=0) else: result = np.sum((np.abs(vector - matrix)),axis=0) diff --git a/psyneulink/core/components/ports/inputport.py b/psyneulink/core/components/ports/inputport.py index ce123b7118..8fdcb4a250 100644 --- a/psyneulink/core/components/ports/inputport.py +++ b/psyneulink/core/components/ports/inputport.py @@ -713,7 +713,8 @@ class InputPort(Port_Base): is executed and its variable is assigned None. If *default_input* is assigned *DEFAULT_VARIABLE*, then the `default value ` for the InputPort's `variable ` is used as its value. This is useful for assignment to a Mechanism that needs a constant (i.e., fixed value) as the input to its - `function `. + `function ` (such as a `bias unit ` in an + `AutodiffComposition`). .. note:: If `default_input ` is assigned *DEFAULT_VARIABLE*, then its `internal_only diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 08f5e91e2b..81b98a5aec 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -3369,7 +3369,7 @@ class NodeRole(enum.Enum): BIAS A `Node ` for which one or more of its `InputPorts ` is assigned *DEFAULT_VARIABLE* as its `default_input ` (which provides it a prespecified - input that is constant across executions). Such a node can also be assigned as an `INPUT` and/or `ORIGIN`, + input that is constant across executions). Such a node can also be assigned as an `INPUT` and/or `ORIGIN`, if it receives input from outside the Composition and/or does not receive any `Projections ` from other Nodes within the Composition, respectively. This role cannot be modified programmatically. diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index f9d296eef8..fbd49f4d7a 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -642,7 +642,7 @@ def _validate_params(self, request_set, target_set=None, context=None): f"a list or 2d np.array containing entries that have the same shape " f"({memory_matrix.shape}) as an entry (row) in 'memory_matrix' arg.") - # Ensure the number of fields is equal to the numbder of items in variable + # Ensure the number of fields is equal to the number of items in variable if FIELDS in request_set: fields = request_set[FIELDS] if len(fields) != len(self.variable): diff --git a/psyneulink/library/compositions/autodiffcomposition.py b/psyneulink/library/compositions/autodiffcomposition.py index 5ce5f1eb18..003b43db76 100644 --- a/psyneulink/library/compositions/autodiffcomposition.py +++ b/psyneulink/library/compositions/autodiffcomposition.py @@ -110,10 +110,17 @@ AutodiffComposition does not (currently) support the *automatic* construction of separate bias parameters. Thus, when constructing a model using an AutodiffComposition that corresponds to one in PyTorch, the `bias ` parameter of PyTorch modules should be set -to `False`. Trainable biases *can* be specified explicitly in an AutodiffComposition by including a -TransferMechanism that projects to the relevant Mechanism (i.e., implementing that layer of the network to -receive the biases) using a `MappingProjection` with a `matrix ` parameter that -implements a diagnoal matrix with values corresponding to the initial value of the biases. +to `False`. + + .. hint:: + Trainable biases *can* be specified explicitly in an AutodiffComposition by including a `ProcessingMechanism` + that projects to the relevant Mechanism (i.e., implementing that layer of the network to receive the biases) + using a `MappingProjection` with a `matrix ` parameter that implements a diagnoal + matrix with values corresponding to the initial value of the biases, and setting the `default_input + ` Parameter of one of the ProcessingMechanism's `input_ports + ` to *DEFAULT_VARIABLE*, and its `default_variable ` + equal to 1. ProcessingMechanisms configured in this way are assigned `NodeRole` `BIAS`, and the MappingProjection + is subject to learning. .. _AutodiffComposition_Nesting: @@ -951,8 +958,9 @@ def create_pathway(node)->list: return pathways - # Construct a pathway for each INPUT Node (except the TARGET Node) - pathways = [pathway for node in self.get_nodes_by_role(NodeRole.INPUT) + # Construct a pathway for each INPUT Node (including BIAS Nodes), except the TARGET Node) + pathways = [pathway + for node in (self.get_nodes_by_role(NodeRole.INPUT) + self.get_nodes_by_role(NodeRole.BIAS)) if node not in self.get_nodes_by_role(NodeRole.TARGET) for pathway in _get_pytorch_backprop_pathway(node)] @@ -1055,8 +1063,7 @@ def _get_loss(self, loss_spec): # and therefore requires a wrapper function to properly package inputs. return lambda x, y: nn.CrossEntropyLoss()(torch.atleast_2d(x), torch.atleast_2d(y.type(x.type()))) elif loss_spec == Loss.BINARY_CROSS_ENTROPY: - if version.parse(torch.version.__version__) >= version.parse('1.12.0'): - return nn.BCELoss() + return nn.BCELoss() elif loss_spec == Loss.L1: return nn.L1Loss(reduction='sum') elif loss_spec == Loss.NLL: @@ -1118,7 +1125,7 @@ def autodiff_forward(self, inputs, targets, trial_loss = 0 for i in range(len(curr_tensors_for_trained_outputs[component])): trial_loss += self.loss_function(curr_tensors_for_trained_outputs[component][i], - curr_target_tensors_for_trained_outputs[component][i]) + curr_target_tensors_for_trained_outputs[component][i]) pytorch_rep.minibatch_loss += trial_loss pytorch_rep.minibatch_loss_count += 1 diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index af84a5b768..b850621760 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -7,242 +7,8 @@ # ********************************************* EMComposition ************************************************* -# -# TODO: -# - QUESTION: -# - SHOULD differential of SoftmaxGainControl Node be included in learning? -# - SHOULD MEMORY DECAY OCCUR IF STORAGE DOES NOT? CURRENTLY IT DOES NOT (SEE EMStorage Function) - -# - FIX: Refactor field_weights to use None instead of 0 to specify value fields, and allow inputs to field_nodes -# - FIX: ALLOW SOFTMAX SPEC TO BE A DICT WITH PARAMETERS FOR _get_softmax_gain() FUNCTION -# - FIX: Concatenation: -# - LLVM for function and derivative -# - Add Concatenate to pytorchcreator_function -# - Deal with matrix assignment in LearningProjection LINE 643 -# - Reinstate test for execution of Concatenate with learning in test_emcomposition (currently commented out) -# - FIX: Softmax Gain Control: -# Test if it current works (they are added to Composition but not in BackProp processing pathway) -# Does backprop have to run through this if not learnable? -# If so, need to add PNL Function, with derivative and LLVM and Pytorch implementations -# - FIX: WRITE MORE TESTS FOR EXECUTION, WARNINGS, AND ERROR MESSAGES -# - learning (with and without learning field weights -# - 3d tuple with first entry != memory_capacity if specified -# - list with number of entries > memory_capacity if specified -# - input is added to the correct row of the matrix for each key and value for -# for non-contiguous keys (e.g, field_weights = [1,0,1])) -# - illegal field weight assignment -# - explicitly that storage occurs after retrieval -# - FIX: WARNING NOT OCCURRING FOR Normalize ON ZEROS WITH MULTIPLE ENTRIES (HAPPENS IF *ANY* KEY IS EVER ALL ZEROS) -# - FIX: IMPLEMENT LearningMechanism FOR RETRIEVAL WEIGHTS: -# - what is learning_update: AFTER doing? Use for scheduling execution of storage_node? -# ?? implement derivative for concatenate -# - FIX: implement add_storage_pathway to handle addition of storage_node as learning mechanism -# - in "_create_storage_learning_components()" assign "learning_update" arg -# as BEORE OR DURING instead of AFTER (assigned to learning_enabled arg of LearningMechanism) -# - FIX: Add StorageMechanism LearningProjections to Composition? -> CAUSES TEST FAILURES; NEEDS INVESTIGATION -# - FIX: Thresholded version of SoftMax gain (per Kamesh) -# - FIX: DEAL WITH INDEXING IN NAMES FOR NON-CONTIGUOUS KEYS AND VALUES (reorder to keep all keys together?) -# - FIX: _import_composition: -# - MOVE LearningProjections -# - MOVE Condition? (e.g., AllHaveRun) (OR PUT ON MECHANISM?) -# - FIX: IMPLEMENT _integrate_into_composition METHOD THAT CALLS _import_composition ON ANOTHER COMPOSITION -# - AND TRANSFERS RELEVANT ATTRIBUTES (SUCH AS MEMORY, query_input_nodeS, ETC., POSSIBLY APPENDING NAMES) -# - FIX: ADD Option to suppress field_weights when computing norm for weakest entry in EMStorageMechanism -# - FIX: GENERATE ANIMATION w/ STORAGE (uses Learning but not in usual way) -# - IMPLEMENT use OF multiple inheritance of EMComposition from AutoDiff and Composition - -# - FIX: DOCUMENTATION: -# - enable_learning vs. learning_field_weights -# - USE OF EMStore.storage_location (NONE => LOCAL, SPECIFIED => GLOBAL) -# - define "keys" and "values" explicitly -# - define "key weights" explicitly as field_weights for all non-zero values -# - make it clear that full size of memory is initialized (rather than "filling up" w/ use) -# - write examples for run() -# - FIX: ADD NOISE -# - FIX: ?ADD add_memory() METHOD FOR STORING W/O RETRIEVAL, OR JUST ADD retrieval_prob AS modulable Parameter -# - FIX: CONFIDENCE COMPUTATION (USING SIGMOID ON DOT PRODUCTS) AND REPORT THAT (EVEN ON FIRST CALL) -# MISC: -# - WRITE TESTS FOR INPUT_PORT and MATRIX SPECS CORRECT IN LATEST BRANCHs -# - ACCESSIBILITY OF DISTANCES (SEE BELOW): MAKE IT A LOGGABLE PARAMETER (I.E., WITH APPROPRIATE SETTER) -# ADD COMPILED VERSION OF NORMED LINEAR_COMBINATION FUNCTION TO LinearCombination FUNCTION: dot / (norm a * norm b) -# - DECAY WEIGHTS BY: -# ? 1-SOFTMAX / N (WHERE N = NUMBER OF ITEMS IN MEMORY) -# or -# 1/N (where N=number of items in memory, and thus gets smaller as N gets -# larger) on each storage (to some asymptotic minimum value), and store the new memory to the unit with the -# smallest weights (randomly selected among “ties" [i.e., within epsilon of each other]), I think we have a -# mechanism that can adaptively use its limited capacity as sensibly as possible, by re-cycling the units -# that have the least used memories. -# - MAKE "_store_memory" METHOD USE LEARNING INSTEAD OF ASSIGNMENT -# - make LearningMechanism that, instead of error, simply adds relevant input to weights (with all others = 0) -# - (relationship to Steven's Hebbian / DPP model?): - -# - ADD ADDITIONAL PARAMETERS FROM CONTENTADDRESSABLEMEMORY FUNCTION -# - ADAPTIVE TEMPERATURE: KAMESH FOR FORMULA -# - ADD MEMORY_DECAY TO ContentAddressableMemory FUNCTION (and compiled version by Samyak) -# - MAKE memory_template A CONSTRUCTOR ARGUMENT FOR default_variable - -# - FIX: PSYNEULINK: -# - TESTS: -# - WRITE TESTS FOR DriftOnASphere variable = scalar, 2d vector or 1d vector of correct and incorrect lengths -# - WRITE TESTS FOR LEARNING WITH LinearCombination of 1, 2 and 3 inputs -# -# - COMPILATION: -# - Remove CIM projections on import to another composition -# - Autodiff support for IdentityFunction -# - MatrixTransform to add normalization -# - _store() method to assign weights to memory -# - LLVM problem with ComparatorMechanism -# -# - pytorchcreator_function: -# SoftMax implementation: torch.nn.Softmax(dim=0) is not getting passed correctly -# Implement LinearCombination -# - MatrixTransform Function: -# -# - LEARNING - Backpropagation LearningFunction / LearningMechanism -# - DOCUMENTATION: -# - weight_change_matrix = gradient (result of delta rule) * learning_rate -# - ERROR_SIGNAL is OPTIONAL (only implemented when there is an error_source specified) -# - Backprop: (related to above?) handle call to constructor with default_variable = None -# - WRITE TESTS FOR USE OF COVARIATES AND RELATED VIOLATIONS: (see ScratchPad) -# - Use of LinearCombination with PRODUCT in output_source -# - Use of LinearCombination with PRODUCT in InputPort of output_source -# - Construction of LearningMechanism with Backprop: -# - MappingProjection / LearningMechanism: -# - Add learning_rate parameter to MappingProjection (if learnable is True) -# - Refactor LearningMechanism to use MappingProjection learning_rate specification if present -# - CHECK FOR EXISTING LM ASSERT IN pytests -# -# - AutodiffComposition: -# - replace handling / flattening of nested compositions with Pytorch.add_module (which adds "child" modules) -# - Check that error occurs for adding a controller to an AutodiffComposition -# - Check that if "epochs" is not in input_dict for Autodiff, then: -# - set to num_trials as default, -# - leave it to override num_trials if specified (add this to DOCUMENTATION) -# - Input construction has to be: -# - same for Autodiff in Python mode and PyTorch mode -# (NOTE: used to be that autodiff could get left in Python mode -# so only where tests for Autodiff happened did it branch) -# - AND different from Composition (in Python mode) -# - support use of pathway argument in Autodff -# - the following format doesn't work for LLVM (see test_identicalness_of_input_types: -# xor = pnl.AutodiffComposition(nodes=[input_layer,hidden_layer,output_layer]) -# xor.add_projections([input_to_hidden_wts, hidden_to_output_wts]) -# - DOCUMENTATION: execution_mode=ExecutionMode.Python allowed -# - Add warning of this on initial call to learn() -# -# - Composition: -# - Add default_execution_mode attribute to allow nested Compositions to be executed in -# different model than outer Composition -# - _validate_input_shapes_and_expand_for_all_trials: consolidate with get_input_format() -# - Generalize treatment of FEEDBACK specification: - # - FIX: ADD TESTS FOR FEEDBACK TUPLE SPECIFICATION OF Projection, DIRECT SPECIFICATION IN CONSTRUCTOR -# - FIX: why aren't FEEDBACK_SENDER and FEEDBACK_RECEIVER roles being assigned when feedback is specified? -# - add property that keeps track of warnings that have been issued, and suppresses repeats if specified -# - add property of Composition that lists it cycles -# - Add warning if termination_condition is trigged (and verbosePref is set) -# - Addition of projections to a ControlMechanism seems too dependent on the order in which the -# the ControlMechanism is constructed with respect to its afferents (if it comes before one, -# the projection to it (i.e., for monitoring) does not get added to the Composition -# - - IMPLEMENTATION OF LEARNING: NEED ERROR IF TRY TO CALL LEARN ON A COMPOSITION THAT HAS NO LEARNING MECHANISMS -# INCLUDING IN PYTHON MODE?? OR JUST ALLOW IT TO CONSTRUCT THE PATHWAY AUTOMATICALLY? -# - Change size argument in constructor to use standard numpy shape format if tupe, and PNL format if list -# - Write convenience Function for returning current time from context -# - requires it be called from execution within aComposition, error otherwise) -# - takes argument for time scale (e.g., TimeScale.TRIAL, TimeScale.RUN, etc.) -# - Add TimeMechanism for which this is the function, and can be configured to report at a timescale -# - Add Composition.run_status attribute assigned a context flag, with is_preparing property that checks it -# (paralleling handling of is_initializing) -# - Allow set of lists as specification for pathways in Composition -# - Add support for set notation in add_backpropagation_learning_pathway (to match add_linear_processing_pathway) -# see ScratchPad: COMPOSITION 2 INPUTS UNNESTED VERSION: MANY-TO-MANY -# - Make sure that shadow inputs (see InputPort_Shadow_Inputs) uses the same matrix as shadowed input. -# - composition.add_backpropagation_learning_pathway(): support use of set notation for multiple nodes that -# project to a single one. -# - add LearningProjections executed in EXECUTION_PHASE to self.projections -# and then remove MODIFIED 8/1/23 in _check_for_unused_projections -# - Why can't verbosePref be set directly on a composition? -# - Composition.add_nodes(): -# - should check, on each call to add_node, to see if one that has a releavantprojection and, if so, add it. -# - Allow [None] as argument and treat as [] -# - IF InputPort HAS default_input = DEFAULT_VARIABLE, -# THEN IT SHOULD BE IGNORED AS AN INPUT NODE IN A COMPOSITION -# - Add use of dict in pathways specification to map outputs from a set to inputs of another set -# (including nested comps) -# -# - ShowGraph: (show_graph) -# - don't show INPUT/OUTPUT Nodes for nested Comps in green/red -# (as they don't really receive input or generate output on a run -# - show feedback projections as pink (shouldn't that already be the case?) -# - add mode for showing projections as diamonds without show_learning (e.g., "show_projections") -# - figure out how to get storage_node to show without all other learning stuff -# - show 'operation' parameter for LinearCombination in show_node_structure=ALL -# - specify set of nodes to show and only show those -# - fix: show_learning=ALL (or merge from EM branch) -# -# - ControlMechanism -# - refactor ControlMechanism per notes of 11/3/21, including: -# FIX: 11/3/21 - MOVE _parse_monitor_specs TO HERE FROM ObjectiveMechanism -# - EpisodicMemoryMechanism: -# - make storage_prob and retrieval_prob parameters linked to function -# - make distance_field_weights a parameter linked to function -# -# - LinearCombination Function: -# - finish adding derivative (for if exponents are specified) -# - remove properties (use getter and setter for Parameters) -# -# - ContentAddressableMemory Function: -# - rename "cue" -> "query" -# - add field_weights as parameter of EM, and make it a shared_parameter ?as well as a function_parameter? - -# - DDM: -# - make reset_stateful_function_when a Parameter and arg in constructor -# and align with reset Parameter of IntegratorMechanism) -# -# - FIX: BUGS: -# - composition: -# - If any MappingProjection is specified from nested node to outer node, -# then direct projections are instantiated to the output_CIM of the outer comp, and the -# nested comp is treated as OUTPUT Node of outer comp even if all its projections are to nodes in outer comp -# LOOK IN add_projections? for nested comps -# - composition (?add_backpropagation_learning_pathway?): -# THIS FAILS: -# comp = Composition(name='a_outer') -# comp.add_backpropagation_learning_pathway([input_1, hidden_1, output_1]) -# comp.add_backpropagation_learning_pathway([input_1, hidden_1, output_2]) -# BUT THE FOLLOWING WORKS (WITH IDENTICAL show_graph(show_learning=True)): -# comp = Composition(name='a_outer') -# comp.add_backpropagation_learning_pathway([input_1, hidden_1, output_1]) -# comp.add_backpropagation_learning_pathway([hidden_1, output_2]) -# - show_graph(): QUIRK (BUT NOT BUG?): -# SHOWS TWO PROJECTIONS FROM a_inner.input_CIM -> hidden_x: -# ?? BECAUSE hidden_x HAS TWO input_ports SINCE ITS FUNCTION IS LinearCombination? -# a_inner = AutodiffComposition([hidden_x],name='a_inner') -# a_outer = AutodiffComposition([[input_1, a_inner, output_1], -# [a_inner, output_2]], -# a_outer.show_graph(show_cim=True) - -# -LearningMechanism / Backpropagation LearningFunction: -# - Construction of LearningMechanism on its own fails; e.g.: -# lm = LearningMechanism(learning_rate=.01, learning_function=BackPropagation()) -# causes the following error: -# TypeError("Logistic.derivative() missing 1 required positional argument: 'self'") -# - Adding GatingMechanism after Mechanisms they gate fails to implement gating projections -# (example: reverse order of the following in _construct_pathways -# self.add_nodes(self.softmax_nodes) -# self.add_nodes(self.field_weight_nodes) -# - add Normalize as option -# - Anytime a row's norm is 0, replace with 1s -# - WHY IS Concatenate NOT WORKING AS FUNCTION OF AN INPUTPORT (WASN'T THAT USED IN CONTEXT OF BUFFER? -# SEE NOTES TO KATHERINE -# -# - TESTS -# For duplicate Projections (e.g., assign a Mechanism in **monitor** of ControlMechanism -# and use comp.add_projection(MappingProjection(mointored, control_mech) -> should generate a duplicate -# then search for other instances of the same error message """ - Contents -------- @@ -250,17 +16,18 @@ - `Organization ` - `Operation ` * `EMComposition_Creation` - - `Fields ` + - `Memory ` - `Capacity ` + - `Fields ` - `Storage and Retrieval ` - `Learning ` * `EMComposition_Structure` - `Input ` - - `Memory ` + - `Memory ` - `Output ` * `EMComposition_Execution` - `Processing ` - - `Learning ` + - `Learning ` * `EMComposition_Examples` - `Memory Template and Fill ` - `Field Weights ` @@ -271,27 +38,36 @@ Overview -------- -The EMComposition implements a configurable, content-addressable form of episodic, or eternal memory, that emulates +The EMComposition implements a configurable, content-addressable form of episodic (or external) memory. It emulates an `EpisodicMemoryMechanism` -- reproducing all of the functionality of its `ContentAddressableMemory` `Function` -- -in the form of an `AutodiffComposition` that is capable of learning how to differentially weight different cues used -for retrieval,, and that adds the capability for `memory_decay `. Its `memory -` is configured using two arguments of its constructor: **memory_template** argument, that defines -how each entry in `memory ` is structured (the number of fields in each entry and the length -of each field); and **field_weights** argument, that defines which fields are used as cues for retrieval, i.e., "keys", -including whether and how they are differentially weighted in the match process used for retrieval); and which -fields are treated as "values" that are stored retrieved, but not used by the match process. The inputs to an -EMComposition, corresponding to each key ("query") and value field are assigned to each of its `INPUT ` -`Nodes ` (listed in its `query_input_nodes ` and `value_input_nodes -` attributes, respectively), and the retrieved values are represented as `OUTPUT -` `Nodes ` of the EMComposition. The `memory ` can be -accessed using its `memory ` attribute. +in the form of an `AutodiffComposition`. This allows it to backpropagate error signals based retrieved values to +it inputs, and learn how to differentially weight cues (queries) used for retrieval. It also adds the capability for +`memory_decay `. In these respects, it implements a variant of a `Modern Hopfield +Network `_, as well as some of the features of a `Transformer +`_ + +The `memory ` of an EMComposition is configured using two arguments of its constructor: +the **memory_template** argument, that defines the overall structure of its `memory ` (the +number of fields in each entry, the length of each field, and the number of entries); and **fields** argument, that +defines which fields are used as cues for retrieval (i.e., as "keys"), including whether and how they are weighted in +the match process used for retrieval, which fields are treated as "values" that are stored retrieved but not used by +the match process, and which are involved in learning. The inputs to an EMComposition, corresponding to its keys and +values, are assigned to each of its `INPUT ` `Nodes `: inputs to be matched to keys +(i.e., used as "queries") are assigned to its `query_input_nodes `; and the remaining +inputs assigned to it `value_input_nodes `. When the EMComposition is executed, the +retrieved values for all fields are returned as the result, and recorded in its `results ` +attribute. The value for each field is assigned as the `value ` of its `OUTPUT ` +`Nodes `. The input is then stored in its `memory `, with a probability +determined by its `storage_prob ` `Parameter`, and all previous memories decayed by its +`memory_decay_rate `. The `memory ` can be accessed using its +`memory ` Parameter. .. technical_note:: - The memories of an EMComposition are actually stored in the `matrix ` attribute of a - set of `MappingProjections ` (see `note below `). The `memory - ` attribute compiles and formats these as a single 3d array, the rows of which (axis 0) - are each entry, the columns of which (axis 1) are the fields of each entry, and the items of which (axis 2) - are the values of each field (see `EMComposition_Memory` for additional details). + The memories of an EMComposition are actually stored in the `matrix ` `Parameter` + of a set of `MappingProjections ` (see `note below `). The + `memory ` Parameter compiles and formats these as a single 3d array, the rows of which + (axis 0) are each entry, the columns of which (axis 1) are the fields of each entry, and the items of which + (axis 2) are the values of each field (see `EMComposition_Memory_Configuration` for additional details). .. _EMComposition_Organization: @@ -302,14 +78,14 @@ *Entries and Fields*. Each entry in memory can have an arbitrary number of fields, and each field can have an arbitrary length. However, all entries must have the same number of fields, and the corresponding fields must all have the same length across entries. Each field is treated as a separate "channel" for storage and retrieval, and is associated with -its own corresponding input (key or value) and output (retrieved value) `Node ` some or all of +its own corresponding input (key or value) and output (retrieved value) `Node `, some or all of which can be used to compute the similarity of the input (key) to entries in memory, that is used for retreieval. Fields can be differentially weighted to determine the influence they have on retrieval, using the `field_weights -` parameter (see `retrieval ` below). The number and -shape of the fields in each entry is specified in the **memory_template** argument of the EMComposition's constructor -(see `memory_template `). Which fields treated as keys (i.e., matched against queries during -retrieval) and which are treated as values (i.e., retrieved but not used for matching retrieval) is specified in the -**field_weights** argument of the EMComposition's constructor (see `field_weights `). +` parameter (see `retrieval ` below). The number and shape +of the fields in each entry is specified in the **memory_template** argument of the EMComposition's constructor (see +`memory_template `). Which fields treated as keys (i.e., matched against queries +during retrieval) and which are treated as values (i.e., retrieved but not used for matching retrieval) is specified in +the **field_weights** argument of the EMComposition's constructor (see `field_weights `). .. _EMComposition_Operation: @@ -317,39 +93,46 @@ *Retrieval.* The values retrieved from `memory ` (one for each field) are based on the relative similarity of the keys to the entries in memory, computed as the distance of each key and the -values in the corresponding field for each entry in memory. By default, normalized dot products (comparable to cosine -similarity) are used to compute the similarity of each query to each key in memory. These distances are then -weighted by the corresponding `field_weights ` for each field (if specified) and then -summed, and the sum is softmaxed to produce a softmax distribution over the entries in memory. That is then used to -generate a softmax-weighted average of the retrieved values across all fields, which is returned as the `result -` of the EMComposition's `execution ` (an EMComposition can also be -configured to return the entry with the lowest distance weighted by field, however then it is not compatible -with learning; see `softmax_choice `). +values in the corresponding field for each entry in memory. By default, for queries and keys that are vectors, +normalized dot products (comparable to cosine similarity) are used to compute the similarity of each query to each +key in memory; and if they are scalars the L0 norm is used. These distances are then weighted by the corresponding +`field_weights ` for each field (if specified) and then summed, and the sum is softmaxed +to produce a softmax distribution over the entries in memory. That is then used to generate a softmax-weighted average +of the retrieved values across all fields, which is returned as the `result ` of the EMComposition's +`execution ` (an EMComposition can also be configured to return the exact entry with the lowest +distance (weighted by field), however then it is not compatible with learning; see `softmax_choice +`). COMMENT: TBD DISTANCE ATTRIBUTES: - The distances used for the last retrieval is stored in XXXX and the distances of each of their corresponding fields + The distance used for the last retrieval is stored in XXXX, and the distances of each of their corresponding fields (weighted by `distance_field_weights `), are returned in XXX, respectively. COMMENT -*Storage.* The `inputs ` to the EMComposition's fields are stored in `memory -` after each execution, with a probability determined by `storage_prob -`. If `memory_decay_rate ` is specified, then the `memory -` is decayed by that amount after each execution. If `memory_capacity -` has been reached, then each new memory replaces the weakest entry (i.e., the one -with the smallest norm across all of its fields) in `memory `. +*Storage.* The `inputs ` to the EMComposition's fields are stored +in `memory ` after each execution, with a probability determined by `storage_prob +`. If `memory_decay_rate ` is specified, then +the `memory ` is decayed by that amount after each execution. If `memory_capacity +` has been reached, then each new memory replaces the weakest entry +(i.e., the one with the smallest norm across all of its fields) in `memory `. .. _EMComposition_Creation: Creation -------- -An EMComposition is created by calling its constructor, that takes the following arguments: +An EMComposition is created by calling its constructor. There are four major elements that can be configured: +the structure of its `memory ; the fields ` for the entries +in memory; how `storage and retrieval ` operate; and whether and how `learning +` is carried out. + +.. _EMComposition_Memory_Specification: - .. _EMComposition_Fields: +*Memory Specification* +~~~~~~~~~~~~~~~~~~~~~~ -*Field Specification* +These arguments are used to specify the shape and number of memory entries. .. _EMComposition_Memory_Template: @@ -394,18 +177,6 @@ zeros, and **memory_fill** is specified, then the matrix is filled with the value specified in **memory_fill**; otherwise, zeros are used to fill all entries. -.. _EMComposition_Memory_Capacity: - -*Memory Capacity* - -* **memory_capacity**: specifies the number of items that can be stored in the EMComposition's memory; when - `memory_capacity ` is reached, each new entry overwrites the weakest entry (i.e., the - one with the smallest norm across all of its fields) in `memory `. If `memory_template - ` is specified as a 3-item tuple or 3d list or array (see above), then that is used - to determine `memory_capacity ` (if it is specified and conflicts with either of those - an error is generated). Otherwise, it can be specified using a numerical value, with a default of 1000. The - `memory_capacity ` cannot be modified once the EMComposition has been constructed. - .. _EMComposition_Memory_Fill: * **memory_fill**: specifies the value used to fill the `memory `, based on the shape specified @@ -420,66 +191,130 @@ This can be ignored, as it does not affect the results of execution, but it can be averted by specifying `memory_fill ` to use small random values (e.g., ``memory_fill=(0,.001)``). +.. _EMComposition_Memory_Capacity: + +* **memory_capacity**: specifies the number of items that can be stored in the EMComposition's memory; when + `memory_capacity ` is reached, each new entry overwrites the weakest entry (i.e., the + one with the smallest norm across all of its fields) in `memory `. If `memory_template + ` is specified as a 3-item tuple or 3d list or array (see above), then that is used + to determine `memory_capacity ` (if it is specified and conflicts with either of those + an error is generated). Otherwise, it can be specified using a numerical value, with a default of 1000. The + `memory_capacity ` cannot be modified once the EMComposition has been constructed. + +.. _EMComposition_Fields: + +*Fields* +~~~~~~~~ + +These arguments are used to specify the names of the fields in a memory entry, which are used as keys and how those are +weighted for retrieval, and whether those weights are learned. + +.. _EMComposition_Field_Specification_Dict: + +* **fields**: a dict that specifies the names of the fields and their attributes. There must be an entry for each + field specified in the **memory_template**, and must have the following format: + + * *key*: a string that specifies the name of the field. + + * *value*: a dict or tuple with three entries; if a dict, the key to each entry must be the keyword specified below, + and if a tuple, the entries must appear in the following order: + + - *FIELD_WEIGHT* `specification ` - value must be a scalar or None. If it is a scalar, + the field is treated as a `retrieval key ` in `memory ` that + is weighted by that value during retrieval; if None, it is treated as a value in `memory ` + and the field cannot be reconfigured later. + + - *LEARN_FIELD_WEIGHT* `specification ` - value must be a boolean or a float; + if False, the field_weight for that field is not learned; if True, the field weight is learned using the + EMComposition's `learning_rate `; if a float, that is used as its learning_rate. + + - *TARGET_FIELD* `specification ` - value must be a boolean; if True, the value of the + `retrieved_node ` for that field conrtributes to the error computed during learning + and backpropagated through the EMComposition (see `Backpropagation of `); + if False, the retrieved value for that field does not contribute to the error; however, its field_weight can still + be learned if that is specfified in `learn_field_weight `. + + .. _note: + The **fields** argument is provided as a convenient and reliable way of specifying field attributes; + the dict itself is not retained as a `Parameter` or attribute of the EMComposition. + + The specifications provided in the **fields** argument are assigned to the corresponding Parameters of + the EMComposition which, alternatively, can be specified individually using the **field_names**, **field_weights**, + **learn_field_weights** and **target_fields** arguments of the EMComposition's constructor, as described below. + However, these and the **fields** argument cannot both be used together; doing so raises an error. + +.. _EMComposition_Field_Names: + +* **field_names**: a list specifies names that can be assigned to the fields. The number of names specified must match + the number of fields specified in the memory_template. If specified, the names are used to label the nodes of the + EMComposition; otherwise, the fields are labeled generically as "Key 0", "Key 1", and "Value 1", "Value 2", etc.. + .. _EMComposition_Field_Weights: -* **field_weights**: specifies which fields are used as keys, and how they are weighted during retrieval. The - number of entries specified must match the number of fields specified in **memory_template** (i.e., the size of - of its first dimension (axis 0)). All non-zero entries must be positive; these designate *keys* -- fields - that are used to match queries against entries in memory for retrieval (see `Match memories by field - `). Entries of 0 designate *values* -- fields that are ignored during the matching - process, but the values of which are retrieved and assigned as the `value ` of the - corresponding `retrieved_node `. This distinction between keys and value corresponds +* **field_weights**: specifies which fields are used as keys, and how they are weighted during retrieval. Fields + designated as keys used to match inputs (queries) against entries in memory for retrieval (see `Match memories by + field `); entries designated as *values* are ignored during the matching process, but + their values in memory are retrieved and assigned as the `value ` of the corresponding + `retrieved_node `. This distinction between keys and value corresponds to the format of a standard "dictionary," though in that case only a single key and value are allowed, whereas - here there can be one or more keys and any number of values; if all fields are keys, this implements a full form of - content-addressable memory. If **learn_field_weights** is True (and `enable_learning` - is either True or a list with True for at least one entry), then the field_weights can be modified during training - (this functions similarly to the attention head of a Transformer model, although at present the field can only be - scalar values rather than vecdtors); if **learn_field_weights** is False, then the field_weights are fixed. - The following options can be used to specify **field_weights**: - - * *None* (the default): all fields except the last are treated as keys, and are weighted equally for retrieval, - while the last field is treated as a value field; - - * *single entry*: all fields are treated as keys (i.e., used for retrieval) and weighted equally for retrieval. - if `normalize_field_weights ` is True, the value is ignored and all - of keys are weighted by 1 / number of keys (i.e., normalized), whereas if `normalize_field_weights - ` is False, then the value specified is used to weight the retrieval of - every keys. - - * *multiple non-zero entries*: If all entries are identical, the value is ignored and the corresponding keys - are weighted equally for retrieval; if the non-zero entries are non-identical, they are used to weight the - corresponding fields during retrieval (see `Weight fields `). In either case, - the remaining fields (with zero weights) are treated as value fields. - - _EMComposition_Field_Weights_Note: + in an EMComposition there can be one or more keys and any number of values; if all fields are keys, this implements a + full form of content-addressable memory. The following options can be used to specify **field_weights**: + + * *None* (the default): all fields except the last are treated as keys, and are assigned a weight of 1, + while the last field is treated as a value field (same as assiging it None in a list or tuple (see below). + + * *scalar*: all fields are treated as keys (i.e., used for retrieval) and weighted equally for retrieval. If + `normalize_field_weights ` is True, the value is divided by the number + of keys, whereas if `normalize_field_weights ` is False, then the value + specified is used to weight the retrieval of all keys with that value. + + .. note:: + At present these have the same result, since the `SoftMax` function is used to normalize the match between + queries and keys. However, other retrieval functions could be used in the future that would be affected by + the value of the `field_weights `. Therefore, it is recommended to leave + `normalize_field_weights ` set to True (the default) to ensure that + the `field_weights ` are normalized to sum to 1.0. + + * *list or tuple*: the number of entries must match the number of fields specified in **memory_template**, and + all entries must be either 0, a positive scalar value, or None. If all entries are identical, they are treated + as if a single value was specified (see above); if the entries are non-identical, any entries that are not None + are used to weight the corresponding fields during retrieval (see `Weight fields `), + including those that are 0 (though these will not be used in the retrieval process unless/until they are changed + to a positive value). If `normalize_field_weights ` is True, all non-None + entries are normalized so that they sum to 1.0; if False, the raw values are used to weight the retrieval of + the corresponding fields. All entries of None are treated as value fields, are not assigned a `field_weight_node + `, and are ignored during retrieval. These *cannot be modified* after the + EMComposition has been constructed (see note below). + + .. _EMComposition_Field_Weights_Change_Note: + .. note:: The field_weights can be modified after the EMComposition has been constructed, by assigning a new set of weights to its `field_weights ` `Parameter`. However, only field_weights associated with - key fields (i.e., were initially assigned non-zero field_weights) can be modified; the weights for value fields - (i.e., ones that were initially assigned a field_weight of 0) cannot be modified, and an attempt to do so will - generate an error. If a field initially used as a value may later need to be used as a key, it should be - assigned a non-zero field_weight when the EMComposition is constructed; it can then be assigned 0 just after - construction, and later changed as needed. + key fields (i.e., that were initially assigned non-zero field_weights) can be modified; the weights for value + fields (i.e., ones that were initially assigned a field_weight of None) cannot be modified, and doing so raises + an error. If a field that will be used initially as a value may later need to be used as a key, it should be + assigned a `field_weight ` of 0 at construction (rather than None), which can then + later be changed as needed. .. technical_note:: - The reason that only field_weights for keys can be modified is that only `field_weight_nodes - ` for keys are constructed, since ones for values would have no effect on the - retrieval process and thus are uncecessary. + The reason that field_weights can be modified only for keys is that `field_weight_nodes + ` are constructed only for keys, since ones for values would have no effect + on the retrieval process and therefore are uncecessary (and can be misleading). -.. _EMComposition_Normalize_Field_Weights: -* **normalize_field_weights**: specifies whether the `field_weights ` are normalized - or their raw values are used. If True, the `field_weights ` are normalized so that - they sum to 1.0, and are used to weight (i.e., multiply) the corresponding fields during retrieval (see `Weight - fields `). If False, the raw values of the `field_weights ` - are used to weight the retrieved value of each field. This setting is ignored if **field_weights** - is None or `concatenate_queries ` is in effect. +* **learn_field_weights**: if **enable_learning** is True, this specifies which field_weights are subject to learning, + and optionally the `learning_rate ` for each (see `learn_field_weights + ` below for details of specification). -.. _EMComposition_Field_Names: +.. _EMComposition_Normalize_Field_Weights: -* **field_names**: specifies names that can be assigned to the fields. The number of names specified must - match the number of fields specified in the memory_template. If specified, the names are used to label the - nodes of the EMComposition. If not specified, the fields are labeled generically as "Key 0", "Key 1", etc.. +* **normalize_field_weights**: specifies whether the `field_weights ` are normalized or + their raw values are used. If True, the value of all non-None `field_weights ` are + normalized so that they sum to 1.0, and the normalized values are used to weight (i.e., multiply) the corresponding + fields during retrieval (see `Weight fields `). If False, the raw values of the + `field_weights ` are used to weight the retrieved value of each field. This setting + is ignored if **field_weights** is None or `concatenate_queries ` is True. .. _EMComposition_Concatenate_Queries: @@ -503,27 +338,20 @@ are always preserved, even when `concatenate_queries ` is True, so that separate inputs can be provided for each key, and the value of each key can be retrieved separately. -.. _EMComposition_Memory_Decay_Rate - -* **memory_decay_rate**: specifies the rate at which items in the EMComposition's memory decay; the default rate - is *AUTO*, which sets it to 1 / `memory_capacity `, such that the oldest memories - are the most likely to be replaced when `memory_capacity ` is reached. If - **memory_decay_rate** is set to 0 None or False, then memories do not decay and, when `memory_capacity - ` is reached, the weakest memories are replaced, irrespective of order of entry. - .. _EMComposition_Retrieval_Storage: *Retrieval and Storage* +~~~~~~~~~~~~~~~~~~~~~~~ -* **storage_prob** : specifies the probability that the inputs to the EMComposition will be stored as an item in +* **storage_prob**: specifies the probability that the inputs to the EMComposition will be stored as an item in `memory ` on each execution. -* **normalize_memories** : specifies whether queries and keys in memory are normalized before computing their dot +* **normalize_memories**: specifies whether queries and keys in memory are normalized before computing their dot products. .. _EMComposition_Softmax_Gain: -* **softmax_gain** : specifies the gain (inverse temperature) used for softmax normalizing the combined distances +* **softmax_gain**: specifies the gain (inverse temperature) used for softmax normalizing the combined distances used for retrieval (see `EMComposition_Execution` below). The following options can be used: * numeric value: the value is used as the gain of the `SoftMax` Function for the EMComposition's @@ -548,7 +376,7 @@ .. _EMComposition_Softmax_Choice: -* **softmax_choice** : specifies how the `SoftMax` Function of the EMComposition's `softmax_node +* **softmax_choice**: specifies how the `SoftMax` Function of the EMComposition's `softmax_node ` is used, with the combined distances, to generate a retrieved item; the following are the options that can be used and the retrieved value they produce: @@ -562,7 +390,7 @@ .. warning:: Use of the *ARG_MAX* and *PROBABILISTIC* options is not compatible with learning, as these implement a discrete choice and thus are not differentiable. Constructing an EMComposition with **softmax_choice** set to either of - these options and **enable_learning** set to True (or a list with any True entries) will generate a warning, and + these options and **learn_field_weights** set to True (or a list with any True entries) will generate a warning, and calling the EMComposition's `learn ` method will generate an error; it must be changed to *WEIGHTED_AVG* to execute learning. @@ -571,37 +399,91 @@ passed as *ARG_MAX_INDICATOR*; and *PROBALISTIC* is passed as *PROB_INDICATOR*; the other SoftMax options are not currently supported. +.. _EMComposition_Memory_Decay_Rate: + +* **memory_decay_rate**: specifies the rate at which items in the EMComposition's memory decay; the default rate + is *AUTO*, which sets it to 1 / `memory_capacity `, such that the oldest memories + are the most likely to be replaced when `memory_capacity ` is reached. If + **memory_decay_rate** is set to 0 None or False, then memories do not decay and, when `memory_capacity + ` is reached, the weakest memories are replaced, irrespective of order of entry. + +.. _EMComposition_Purge_by_Weight: + +* **purge_by_field_weight**: specifies whether `field_weights ` are used in determining + which memory entry is replaced when a new memory is `stored `. If True, the norm of each + entry is multiplied by its `field_weight ` to determine which entry is the weakest and + will be replaced. + .. _EMComposition_Learning: *Learning* +~~~~~~~~~~ -EMComposition supports two forms of learning -- error backpropagation and the learning of `field_weights -` -- that can be configured by the following arguments of the EMComposition's constructor: - -* **enable_learning** : specifies whether learning is enabled for the EMComposition and, if so, which `retrieved_nodes - ` are used to compute errors, and propagate these back through the network. If - **enable_learning** is False, then no learning occurs, including of `field_weights `). - If it is True, then all of the `retrieved_nodes ` participate in learning: For - those that do not project to an outer Composition (i.e., one in which the EMComposition is `nested - `), a `TARGET ` node is constructed for each, and used to compute errors that - are backpropagated through the network to its `query_input_nodes ` and - `value_input_nodes `, and on to any nodes that project to it from a composition - in which the EMComposition is `nested `; retrieved_nodes that *do* project to an outer - Composition receive their errors from those nodes, which are also backpropagated through the EMComposition. - If **enable_learning** is a list, then only the `retrieved_nodes ` specified in the - list participate in learning, and errors are computed only for those nodes. The list must contain the same - number of entries as there are `fields ` and corresponding `retreived_nodes - `, and each entry must be a boolean that specifies whether the corresponding - `retrieved_node ` is used for learning. - -* **learn_field_weights** : specifies whether `field_weights ` are modifiable during - learning (see `field_weights ` and `Learning ` for additional - information. For learning of `field_weights ` to occur, **enable_learning** must - also be True, or it must be a list with at least one True entry. If **learn_field_weights** is True, - **use_gating_for_weighting** must be False (see `note `). - -* **learning_rate** : specifies the rate at which `field_weights ` are learned if - **learn_field_weights** is True; see `Learning ` for additional information. +EMComposition supports two forms of learning: error backpropagation through the entire Composition, and the learning +of `field_weights ` within it. Learning is enabled by setting the **enable_learning** +argument of the EMComposition's constructor to True, and optionally specifying the **learn_field_weights** argument +(as detailed below). If **enable_learning** is False, no learning of any kind occurs; if it is True, then both forms +of learning are enable. + +.. _EMComposition_Error_BackPropagation + +*Backpropagation of error*. If **enable_learning** is True, then the values retrieved from `memory +` when the EMComposition is executed during learning can be used for error computation +and backpropagation through the EMComposition to its inputs. By default, the values of all of its `retrieved_nodes +` are included. For those that do not project to an outer Composition (i.e., one in +which the EMComposition is `nested `), a `TARGET ` node is constructed +for each, and used to compute errors that are backpropagated through the network to its `query_input_nodes +` and `value_input_nodes `, and on to any +nodes that project to those from a Composition within which the EMComposition is `nested `. +Retrieved_nodes that *do* project to an outer Composition receive their errors from those nodes, which are also +backpropagated through the EMComposition. Fields can be selecdtively specified for learning in the **fields** argument +or the **target_fields** argument of the EMComposition's constructor, as detailed below. + +*Field Weight Learning*. If **enable_learning** is True, then the `field_weights ` can +be learned, by specifing these either in the **fields** argument or the **learn_field_weights** argument of the +EMComposition's constructor, as detailed below. Learning field_weights implements a function comparable to the learning +in an attention head of the `Transformer `_ architecture, although at present the +field can only be scalar values rather than vectors or matrices, and it cannot receive input. These capabilities will +be added in the future. + +The following arguments of the EMComposition's constructor can be used to configure learning: + +* **enable_learning**: specifies whether any learning is enabled for the EMComposition. If False, + no learning occurs; ` if True, then both error backpropagation and learning of `field_weights + ` can occur. If **enable_learning** is True, **use_gating_for_weighting** + must be False (see `note `). + +.. _EMComposition_Target_Fields: + +* **target_fields**: specifies which `retrieved_nodes ` are used to compute + errors, and propagate these back through the EMComposition to its `query ` and + `value_input_nodes `. If this is None (the default), all `retrieved_nodes + ` are used; if it is a list or tuple, then it must have the same number of entries + as there are fields, and each entry must be a boolean specifying whether the corresponding `retrieved_nodes + ` participate in learning, and errors are computed only for those nodes. This can + also be specified in a dict for the **fields** argument (see `fields `). + +.. _EMComposition_Field_Weights_Learning: + +* **learn_field_weights**: specifies which field_weights are subject to learning, and optionally the `learning_rate + ` for each; this can also be specified in a dict for the **fields** argument (see + `fields `). The following specfications can be used: + + * *None*: all field_weights are subject to learning, and the `learning_rate ` for the + EMComposition is used as the learning_rate for all field_weights. + + * *bool*: If True, all field_weights are subject to learning, and the `learning_rate ` + for the EMComposition is used as the learning rate for all field_weights; if False, no field_weights are + subject to learning, regardless of `enable_learning `. + + * *list* or *tuple*: must be the same length as the number of fields specified in the memory_template, and each entry + must be either True, False or a positive scalar value. If True, the corresponding field_weight is subject to + learning and the `learning_rate ` for the EMComposition is used to specify the + learning_ rate for that field; if False, the corresponding field_weight is not subject to learning; if a scalar + value is specified, it is used as the `learning_rate` for that field. + +* **learning_rate**: specifies the learning_rate for any `field_weights ` for which a + learning_rate is not individually specified in the **learn_field_weights** argument (see above). .. _EMComposition_Structure: @@ -617,7 +499,7 @@ ` of the EMComposition, listed in its `query_input_nodes ` and `value_input_nodes ` attributes, respectively, -.. _EMComposition_Memory: +.. _EMComposition_Memory_Structure: *Memory* ~~~~~~~~ @@ -672,8 +554,8 @@ * **Input**. The inputs to the EMComposition are provided to the `query_input_nodes ` and `value_input_nodes `. The former are used for matching to the corresponding - `fields ` of the `memory `, while the latter are retrieved but not used - for matching. + `fields ` of the `memory `, while the latter are retrieved + but not used for matching. * **Concatenation**. By default, the input to every `query_input_node ` is passed to a to its own `match_node ` through a `MappingProjection` that computes its @@ -700,9 +582,9 @@ (or the `concatenate_queries_node ` if `concatenate_queries ` attribute is True) are passed through a `MappingProjection` that computes the distance between the corresponding input (query) and each memory (key) for the corresponding field, - the result of which is possed to the corresponding `match_node `. By default, the - distance is computed as the normalized dot product (i.e., between the normalized query vector and the normalized - key for the corresponding `field `, that is comparable to using cosine similarity). However, + the result of which is possed to the corresponding `match_node `. By default, the distance + is computed as the normalized dot product (i.e., between the normalized query vector and the normalized key for the + corresponding `field `, that is comparable to using cosine similarity). However, if `normalize_memories ` is set to False, just the raw dot product is computed. The distance can also be customized by specifying a different `function ` for the `MappingProjection` to the `match_node `. The result is assigned as the `value @@ -751,9 +633,12 @@ `gain ` parameter; if None is specified, the default value of the `Softmax` Function is used as the `gain ` parameter (see `Softmax_Gain ` for additional details). +.. _EMComposition_Retreived_Values: + * **Retrieve values by field**. The vector of softmax weights for each memory generated by the `softmax_node ` is passed through the Projections to the each of the `retrieved_nodes - ` to compute the retrieved value for each field. + ` to compute the retrieved value for each field, which is assigned as the value + of the corresponding `retrieved_node `. * **Decay memories**. If `memory_decay ` is True, then each of the memories is decayed by the amount specified in `memory_decay_rate `. @@ -768,19 +653,19 @@ .. _EMComposition_Storage: -* **Store memories**. After the values have been retrieved, the inputs to for each field (i.e., values in the - `query_input_nodes ` and `value_input_nodes `) - are added by the `storage_node ` as a new entry in `memory `, - replacing the weakest one if `memory_capacity ` has been reached. +* **Store memories**. After the values have been retrieved, the `storage_node ` + adds the inputs to each field (i.e., values in the `query_input_nodes ` and + `value_input_nodes `) as a new entry in `memory `, + replacing the weakest one. The weakest memory is the one with the lowest norm, multipled by its `field_weight + ` if `purge_by_field_weight ` is True. .. technical_note:: - This is done by adding the input vectors to the the corresponding rows of the `matrix ` - of the `MappingProjection` from the `combined_matches_node ` to each - of the `retrieved_nodes `, as well as the `matrix ` - parameter of the `MappingProjection` from each `query_input_node ` to the - corresponding `match_node ` (see note `above ` for - additional details). If `memory_capacity ` has been reached, then the weakest - memory (i.e., the one with the lowest norm across all fields) is replaced by the new memory. + The norm of each entry is calculated by adding the input vectors to the the corresponding rows of + the `matrix ` of the `MappingProjection` from the `combined_matches_node + ` to each of the `retrieved_nodes `, + as well as the `matrix ` parameter of the `MappingProjection` from each + `query_input_node ` to the corresponding `match_node + ` (see note `above ` for additional details). COMMENT: FROM CodePilot: (OF HISTORICAL INTEREST?) @@ -798,25 +683,24 @@ *Training* ~~~~~~~~~~ -If `learn ` is called, `enable_learning ` is True or a list with -any True entries, then errors will be computed for each of the `retrieved_nodes ` -that is specified for learning (see `Learning ` for details about specification). These errors -are derived either from any errors backprpated to the EMComposition from an outer Composition in which it is `nested -`, or locally by the difference between the `retrieved_nodes ` -and the `target_nodes ` that are created for each of the `retrieved_nodes -` that do not project to an outer Composition. These errors are then backpropagated -through the EMComposition to the `query_input_nodes ` and `value_input_nodes -`, and on to any nodes that project to it from a composition in which the -EMComposition is `nested `. - -If `learn_field_weights ` is also True, then the `field_weights -` are modified to minimize the error passed to the EMComposition retrieved nodes, using the -`learning_rate ` specified in the `learning_rate ` attribute. -If `learn_field_weights ` is False (or `run ` is called, then the -If `learn_field_weights ` is False), then the `field_weights -` are not modified and the EMComposition is simply executed -without any modification, and error signals are passed to the nodes that project to its `query_input_nodes -` and `value_input_nodes `. +If `learn ` is called, `enable_learning ` is True, then errors +will be computed for each of the `retrieved_nodes ` that is specified for learning +(see `Learning ` for details about specification). These errors are derived either from any +errors backprpated to the EMComposition from an outer Composition in which it is `nested `, +or locally by the difference between the `retrieved_nodes ` and the `target_nodes +` that are created for each of the `retrieved_nodes ` +that do not project to an outer Composition. These errors are then backpropagated through the EMComposition to the +`query_input_nodes ` and `value_input_nodes `, +and on to any nodes that project to it from a composition in which the EMComposition is `nested `. + +If `learn_field_weights` is also specified, then the corresponding `field_weights ` are +modified to minimize the error passed to the EMComposition retrieved nodes that have been specified for learning, +using the `learning_rate ` for them in `learn_field_weights +` or the default `learning rate ` for the EMComposition. +If `enable_learning ` is False (or `run ` is called rather than +`learn `, then the `field_weights ` are not modified, and no error +signals are passed to the nodes that project to its `query_input_nodes ` and +`value_input_nodes `. .. note:: The only parameters modifable by learning in the EMComposition are its `field_weights @@ -827,7 +711,7 @@ Although memory storage is implemented as a form of learning (though modification of MappingProjection `matrix ` parameters; see `memory storage `), this occurs irrespective of how EMComposition is run (i.e., whether `learn ` or `run - ` is called), and is not affected by the `learn_field_weights ` + ` is called), and is not affected by the `enable_learning ` or `learning_rate ` attributes, which pertain only to whether the `field_weights ` are modified during learning. Furthermore, when run in PyTorch mode, storage is executed after the forward() and backward() passes are complete, and is not considered as part of the @@ -898,7 +782,7 @@ >>> em = EMComposition(memory_template=(4,2,5)) both of which create a memory with 4 entries, each with 2 fields of length 5. The contents of `memory -` can be inspected using the `memory ` attribute:: +` can be inspected using the `memory ` attribute:: >>> em.memory [[array([0., 0., 0., 0., 0.]), array([0., 0., 0., 0., 0.])], @@ -1038,7 +922,6 @@ --------------- """ import numpy as np -import graph_scheduler as gs import warnings import psyneulink.core.scheduling.condition as conditions @@ -1066,12 +949,20 @@ from psyneulink.core.llvm import ExecutionMode -__all__ = ['EMComposition', 'EMCompositionError', 'WEIGHTED_AVG', 'PROBABILISTIC'] +__all__ = ['EMComposition', 'EMCompositionError', 'FIELD_WEIGHT', 'LEARN_FIELD_WEIGHT', + 'PROBABILISTIC', 'TARGET_FIELD','WEIGHTED_AVG'] +# softmax_choice options: STORAGE_PROB = 'storage_prob' WEIGHTED_AVG = ALL PROBABILISTIC = PROB_INDICATOR +# specs for entry of fields specification dict +FIELD_WEIGHT = 'field_weight' +LEARN_FIELD_WEIGHT = 'learn_field_weight' +TARGET_FIELD = 'target_field' + +# Node names QUERY_NODE_NAME = 'QUERY' QUERY_AFFIX = f' [{QUERY_NODE_NAME}]' VALUE_NODE_NAME = 'VALUE' @@ -1123,23 +1014,26 @@ def field_weights_setter(field_weights, owning_component=None, context=None): raise EMCompositionError(f"The number of field_weights ({len(field_weights)}) must match the number of fields " f"{len(owning_component.field_weights)}") if owning_component.normalize_field_weights: - field_weights = field_weights / np.sum(field_weights) + denominator = np.sum(np.where(field_weights is not None, field_weights, 0)) + field_weights = [fw / denominator if fw is not None else None for fw in field_weights] + + # Assign new fields_weights to default_variable of field_weight_nodes field_wt_node_idx = 0 # Needed since # of field_weight_nodes may be less than # of fields + # and now way to know if user has assigned a value where there used to be a None for i, field_weight in enumerate(field_weights): - # Check if original value was 0 (i.e., a value node), in which case disallow change - if not owning_component.parameters.field_weights.default_value[i]: + # Check if original value was None (i.e., a value node), in which case disallow change + if owning_component.parameters.field_weights.default_value[i] is None: if field_weight: raise EMCompositionError(f"Field '{owning_component.field_names[i]}' of '{owning_component.name}' " - f"was originally assigned as a value node (i.e., with a field_weight = 0); " + f"was originally assigned as a value node (i.e., with a field_weight = None); " f"this cannot be changed after construction. If you want to change it to a " - f"key field, you must re-construct the EMComposition using a non-zero value " - f"for its field in the `field_weights` arg, " - f"which can then be changed to 0 after construction.") + f"key field, you must re-construct the EMComposition using a scalar " + f"for its field in the `field_weights` arg (including 0.") continue owning_component.field_weight_nodes[field_wt_node_idx].input_port.defaults.variable = field_weights[i] owning_component.field_weights[i] = field_weights[i] field_wt_node_idx += 1 - return field_weights + return np.array(field_weights) def get_softmax_gain(v, scale=1, base=1, entropy_weighting=.1)->float: """Compute the softmax gain (inverse temperature) based on the entropy of the distribution of values. @@ -1166,17 +1060,19 @@ class EMComposition(AutodiffComposition): memory_template=[[0],[0]], \ memory_fill=0, \ memory_capacity=None, \ + fields=None, \ + field_names=None, \ field_weights=None, \ + learn_field_weights=False, \ + learning_rate=True, \ normalize_field_weights=True, \ - field_names=None, \ concatenate_queries=False, \ normalize_memories=True, \ softmax_gain=THRESHOLD, \ storage_prob=1.0, \ memory_decay_rate=AUTO, \ enable_learning=True, \ - learn_field_weights=True, \ - learning_rate=True, \ + target_fields=None, \ use_gating_for_weighting=False, \ name="EM_Composition" \ ) @@ -1190,49 +1086,67 @@ class EMComposition(AutodiffComposition): --------- memory_template : tuple, list, 2d or 3d array : default [[0],[0]] - specifies the shape of an item to be stored in the EMComposition's memory; - see `memory_template ` for details. + specifies the shape of an item to be stored in the EMComposition's memory + (see `memory_template ` for details). memory_fill : scalar or tuple : default 0 - specifies the value used to fill the memory when it is initialized; - see `memory_fill ` for details. + specifies the value used to fill the memory when it is initialized + (see `memory_fill ` for details). memory_capacity : int : default None specifies the number of items that can be stored in the EMComposition's memory; - see `memory_capacity ` for details. + (see `memory_capacity ` for details). + + fields : dict[tuple[field weight, learning specification]] : default None + each key must a string that is the name of a field, and its value a dict or tuple that specifies that field's + `field_weight `, `learn_field_weights `, and + `target_fields ` specifications (see `fields ` for details + of specificaton format). The **fields** arg replaces the **field_names**, **field_weights** + **learn_field_weights**, and **target_fields** arguments, and specifying any of these raises an error. + + field_names : list or tuple : default None + specifies the names assigned to each field in the memory_template (see `field names ` + for details). If the **fields** argument is specified, this is not necessary and specifying raises an error. + + field_weights : list or tuple : default (1,0) + specifies the relative weight assigned to each key when matching an item in memory (see `field weights + ` for additional details). If the **fields** argument is specified, this + is not necessary and specifying raises an error. - field_weights : tuple : default (1,0) - specifies the relative weight assigned to each key when matching an item in memory; - see `field weights ` for additional details. + learn_field_weights : bool or list[bool, int, float]: default False + specifies whether the `field_weights ` are learnable and, if so, optionally what + the learning_rate is for each field (see `learn_field_weights ` for + specifications). If the **fields** argument is specified, this is not necessary and specifying raises an error. + + learning_rate : float : default .01 + specifies the default learning_rate for `field_weights ` not + specified in `learn_field_weights ` (see `learning_rate + ` for additional details). normalize_field_weights : bool : default True specifies whether the **fields_weights** are normalized over the number of keys, or used as absolute - weighting values when retrieving an item from memory; see `normalize_field weights - ` for additional details. - - field_names : list : default None - specifies the optional names assigned to each field in the memory_template; - see `field names ` for details. + weighting values when retrieving an item from memory (see `normalize_field weights + ` for additional details). concatenate_queries : bool : default False specifies whether to concatenate the keys into a single field before matching them to items in - the corresponding fields in memory; see `concatenate keys ` for details. + the corresponding fields in memory (see `concatenate keys ` for details). normalize_memories : bool : default True - specifies whether keys and memories are normalized before computing their dot product (similarity); - see `Match memories by field ` for additional details. + specifies whether keys and memories are normalized before computing their dot product (similarity) + (see `Match memories by field ` for additional details). softmax_gain : float, ADAPTIVE or CONTROL : default 1.0 - specifies the temperature used for softmax normalizing the distance of queries and keys in memory; - see `Softmax normalize matches over fields ` for additional details. + specifies the temperature used for softmax normalizing the distance of queries and keys in memory + (see `Softmax normalize matches over fields ` for additional details). softmax_threshold : float : default .0001 - specifies the threshold used to mask out small values in the softmax calculation; + specifies the threshold used to mask out small values in the softmax calculation see *mask_threshold* under `Thresholding and Adaptive Gain ` for details). softmax_choice : WEIGHTED_AVG, ARG_MAX, PROBABILISTIC : default WEIGHTED_AVG - specifies how the softmax over distances of queries and keys in memory is used for retrieval; - see `softmax_choice ` for a description of each option. + specifies how the softmax over distances of queries and keys in memory is used for retrieval + (see `softmax_choice ` for a description of each option). storage_prob : float : default 1.0 specifies the probability that an item will be stored in `memory ` @@ -1240,23 +1154,23 @@ class EMComposition(AutodiffComposition): additional details). memory_decay_rate : float : AUTO - specifies the rate at which items in the EMComposition's memory decay; - see `memory_decay_rate ` for details. + specifies the rate at which items in the EMComposition's memory decay + (see `memory_decay_rate ` for details). - enable_learning : bool or list[bool]: default True - specifies whether a learning pathway is constructed for each `field ` - of the EMComposition. If it is a list, each item must be ``True`` or ``False`` and the number of items - must be equal to the number of `fields specified; see `enable_learning - ` for additional details. + purge_by_field_weights : bool : False + specifies whether `fields_weights ` are used to determine which memory to + replace when a new one is stored (see `purge_by_field_weight ` for details). - learn_field_weights : bool : default True - specifies whether `field_weights ` are learnable during training; - requires **enable_learning** to be True to have any effect, and **use_gating_for_weighting** must be False; - see `learn_field_weights ` for additional details. + enable_learning : bool : default True + specifies whether learning is enabled for the EMCComposition (see `Learning ` + for additional details); **use_gating_for_weighting** must be False. - learning_rate : float : default .01 - specifies rate at which `field_weights ` are learned - if `learn_field_weights ` is True. + target_fields : list[bool]: default None + specifies whether a learning pathway is constructed for each `field ` + of the EMComposition. If it is a list, each item must be ``True`` or ``False`` and the number of items + must be equal to the number of `fields specified (see `Target Fields + ` for additional details). If the **fields** argument is specified, + this is not necessary and specifying raises an error. # 7/10/24 FIX: STILL TRUE? DOES IT PRECLUDE USE OF EMComposition as a nested Composition?? .. technical_note:: @@ -1275,7 +1189,8 @@ class EMComposition(AutodiffComposition): memory : ndarray 3d array of entries in memory, in which each row (axis 0) is an entry, each column (axis 1) is a field, and - each item (axis 2) is the value for the corresponding field; see `EMComposition_Memory` for additional details. + each item (axis 2) is the value for the corresponding field (see `EMComposition_Memory_Specification` for + additional details). .. note:: This is a read-only attribute; memories can be added to the EMComposition's memory either by @@ -1287,8 +1202,12 @@ class EMComposition(AutodiffComposition): .. _EMComposition_Parameters: memory_capacity : int - determines the number of items that can be stored in `memory `; see `memory_capacity - ` for additional details. + determines the number of items that can be stored in `memory ` + (see `memory_capacity ` for additional details). + + field_names : list[str] + determines which names that can be used to label fields in `memory ` + (see `field_names ` for additional details). field_weights : tuple[float] determines which fields of the input are treated as "keys" (non-zero values) that are used to match entries in @@ -1298,37 +1217,42 @@ class EMComposition(AutodiffComposition): see `field_weights ` additional details. The field_weights can be changed by assigning a new list of weights to the `field_weights ` attribute, however only the weights for fields used as `keys ` can be changed (see - `EMComposition_Field_Weights_Note` for additional details). + `EMComposition_Field_Weights_Change_Note` for additional details). - normalize_field_weights : bool : default True - determines whether `fields_weights ` are normalized over the number of keys, or - used as absolute weighting values when retrieving an item from memory; see `normalize_field weights - ` for additional details. + learn_field_weights : bool or list[bool, int, float] + determines whether the `field_weight ` for each `field + is learnable (see `learn_field_weights ` for additional details). - field_names : list[str] - determines which names that can be used to label fields in `memory `; see - `field_names ` for additional details. + learning_rate : float + determines the default learning_rate for `field_weights ` + not specified in `learn_field_weights ` + (see `learning_rate ` for additional details). + + normalize_field_weights : bool + determines whether `fields_weights ` are normalized over the number of keys, or + used as absolute weighting values when retrieving an item from memory (see `normalize_field weights + ` for additional details). concatenate_queries : bool determines whether keys are concatenated into a single field before matching them to items in `memory - `; see `concatenate keys ` for additional details. + ` for additional details). normalize_memories : bool - determines whether keys and memories are normalized before computing their dot product (similarity); - see `Match memories by field ` for additional details. + determines whether keys and memories are normalized before computing their dot product (similarity) + (see `Match memories by field ` for additional details). softmax_gain : float, ADAPTIVE or CONTROL - determines gain (inverse temperature) used for softmax normalizing the summed distances of queries and keys in - memory by the `SoftMax` Function of the `softmax_node `; see `Softmax normalize - distances ` for additional details. + determines gain (inverse temperature) used for softmax normalizing the summed distances of queries + and keys in memory by the `SoftMax` Function of the `softmax_node ` + (see `Softmax normalize distances ` for additional details). softmax_threshold : float - determines the threshold used to mask out small values in the softmax calculation; - see *mask_threshold* under `Thresholding and Adaptive Gain ` for details). + determines the threshold used to mask out small values in the softmax calculation + (see *mask_threshold* under `Thresholding and Adaptive Gain ` for details). softmax_choice : WEIGHTED_AVG, ARG_MAX or PROBABILISTIC - determines how the softmax over distances of queries and keys in memory is used for retrieval; - see `softmax_choice ` for a description of each option. + determines how the softmax over distances of queries and keys in memory is used for retrieval + (see `softmax_choice ` for a description of each option). storage_prob : float determines the probability that an item will be stored in `memory ` @@ -1336,26 +1260,20 @@ class EMComposition(AutodiffComposition): additional details). memory_decay_rate : float - determines the rate at which items in the EMComposition's memory decay (see `memory_decay_rate - ` for details). - - enable_learning : bool or list[bool] - determines whether `learning ` is enabled for the EMComposition, allowing any error - received by the `retrieved_nodes ` to be propagated to the corresponding - `query_input_nodes ` and `value_input_nodes - `, and on to any `Nodes ` that project to them. - If True, learning is enabled for all fields and if False learning is disabled for all fields; If it is a - list, then each entry specifies whether learning is enabled or disabled for the corresponding field - see `Learning ` and `Fields ` for additional details. - - learn_field_weights : bool - determines whether `field_weights ` are learnable during training; - requires `enable_learning ` to be True or a list with at least one True - entry for the corresponding field; see `Learning ` for additional details. + determines the rate at which items in the EMComposition's memory decay + (see `memory_decay_rate ` for details). - learning_rate : float - determines whether the rate at which `field_weights ` are learned - if `learn_field_weights` is True; see `Learning ` for additional details. + purge_by_field_weights : bool + determines whether `fields_weights ` are used to determine which memory to + replace when a new one is stored (see `purge_by_field_weight ` for details). + + enable_learning : bool + determines whether learning is enabled for the EMCComposition + (see `Learning ` for additional details). + + target_fields : list[bool] + determines which fields convey error signals during learning + (see `Target Fields ` for additional details). .. _EMComposition_Nodes: @@ -1394,7 +1312,7 @@ class EMComposition(AutodiffComposition): as the corresponding `query_input_nodes `. weighted_match_nodes : list[ProcessingMechanism] - `ProcessingMechanisms ` that combine the `field weight ` + `ProcessingMechanisms ` that combine the `field weight ` for each `key field ` with the dot product computed by the corresponding the `match_node `. These are only implemented if `use_gating_for_weighting ` is False (see `Weight distances ` @@ -1426,7 +1344,7 @@ class EMComposition(AutodiffComposition): ` (see `Retrieve values by field ` for additional details). These are assigned the same names as the `query_input_nodes ` and `value_input_nodes ` to which they correspond appended with the suffix - * [RETRIEVED]*, and are in the same order as `input_nodes_by_fields ` + * [RETRIEVED]*, and are in the same order as `input_nodes ` to which to which they correspond. storage_node : EMStorageMechanism @@ -1441,13 +1359,13 @@ class EMComposition(AutodiffComposition): any subequent processing is done (i.e., in a composition in which the EMComposition may be embededded. input_nodes : list[ProcessingMechanism] - Full list of `INPUT ` `Nodes ` ordered with query_input_nodes first - followed by value_input_nodes; used primarily for internal computations - - input_nodes_by_fields : list[ProcessingMechanism] Full list of `INPUT ` `Nodes ` in the same order specified in the **field_names** argument of the constructor and in `self.field_names `. + query_and_value_input_nodes : list[ProcessingMechanism] + Full list of `INPUT ` `Nodes ` ordered with query_input_nodes first + followed by value_input_nodes; used primarily for internal computations. + """ componentCategory = EM_COMPOSITION @@ -1472,7 +1390,7 @@ class Parameters(AutodiffComposition.Parameters): see `enable_learning ` :default value: True - :type: ``bool`` or ``list`` + :type: ``bool`` field_names see `field_names ` @@ -1486,18 +1404,18 @@ class Parameters(AutodiffComposition.Parameters): :default value: None :type: ``numpy.ndarray`` + learn_field_weights + see `learn_field_weights ` + + :default value: True + :type: ``numpy.ndarray`` + learning_rate see `learning_results ` :default value: [] :type: ``list`` - learn_field_weights - see `learn_field_weights ` - - :default value: True - :type: ``bool`` - memory see `memory ` @@ -1534,6 +1452,12 @@ class Parameters(AutodiffComposition.Parameters): :default value: True :type: ``bool`` + purge_by_field_weights + see `purge_by_field_weights ` + + :default value: False + :type: ``bool`` + random_state see `random_state ` @@ -1564,9 +1488,11 @@ class Parameters(AutodiffComposition.Parameters): memory = Parameter(None, loggable=True, getter=_memory_getter, read_only=True) memory_template = Parameter([[0],[0]], structural=True, valid_types=(tuple, list, np.ndarray), read_only=True) memory_capacity = Parameter(1000, structural=True) - field_weights = Parameter(None, setter=field_weights_setter) - normalize_field_weights = Parameter(True) field_names = Parameter(None, structural=True) + field_weights = Parameter([1], setter=field_weights_setter) + learn_field_weights = Parameter(False, structural=True) + learning_rate = Parameter(.001, modulable=True) + normalize_field_weights = Parameter(True) concatenate_queries = Parameter(False, structural=True) normalize_memories = Parameter(True) softmax_gain = Parameter(1.0, modulable=True) @@ -1574,9 +1500,9 @@ class Parameters(AutodiffComposition.Parameters): softmax_choice = Parameter(WEIGHTED_AVG, modulable=False, specify_none=True) storage_prob = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) memory_decay_rate = Parameter(AUTO, modulable=True) + purge_by_field_weights = Parameter(False, structural=True) enable_learning = Parameter(True, structural=True) - learn_field_weights = Parameter(True, structural=True) - learning_rate = Parameter(.001, modulable=True) + target_fields = Parameter(None, read_only=True, structural=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') seed = Parameter(DEFAULT_SEED(), modulable=True, setter=_seed_setter) @@ -1597,27 +1523,29 @@ def _validate_memory_template(self, memory_template): else: return f"must be tuple of length 2 or 3, or a list or array that is either 2 or 3d." + def _validate_field_names(self, field_names): + if field_names and not all(isinstance(item, str) for item in field_names): + return f"must be a list of strings." + def _validate_field_weights(self, field_weights): if field_weights is not None: if not np.atleast_1d(field_weights).ndim == 1: return f"must be a scalar, list of scalars, or 1d array." - if any([field_weight < 0 for field_weight in field_weights]): + if len(field_weights) == 1 and field_weights[0] is None: + raise EMCompositionError(f"must be a scalar, since there is only one field specified.") + if any([field_weight < 0 for field_weight in field_weights if field_weight is not None]): return f"must be all be positive values." def _validate_normalize_field_weights(self, normalize_field_weights): if not isinstance(normalize_field_weights, bool): return f"must be all be a boolean value." - def _validate_field_names(self, field_names): - if field_names and not all(isinstance(item, str) for item in field_names): - return f"must be a list of strings." - - def _validate_enable_learning(self, enable_learning): - if isinstance(enable_learning, list): - if not all(isinstance(item, bool) for item in enable_learning): - return f"can only contains bools as entries." - elif not isinstance(enable_learning, bool): - return f"must be a bool or list of bools." + def _validate_learn_field_weights(self, learn_field_weights): + if isinstance(learn_field_weights, (list, np.ndarray)): + if not all(isinstance(item, (bool, int, float)) for item in learn_field_weights): + return f"can only contains bools, ints or floats as entries." + elif not isinstance(learn_field_weights, bool): + return f"must be a bool or list of bools, ints and/or floats." def _validate_memory_decay_rate(self, memory_decay_rate): if memory_decay_rate is None or memory_decay_rate == AUTO: @@ -1642,8 +1570,11 @@ def __init__(self, memory_template:Union[tuple, list, np.ndarray]=[[0],[0]], memory_capacity:Optional[int]=None, memory_fill:Union[int, float, tuple, RANDOM]=0, + fields:Optional[dict]=None, field_names:Optional[list]=None, - field_weights:tuple=None, + field_weights:Union[int,float,list,tuple]=None, + learn_field_weights:Union[bool,list,tuple]=None, + learning_rate:float=None, normalize_field_weights:bool=True, concatenate_queries:bool=False, normalize_memories:bool=True, @@ -1652,9 +1583,9 @@ def __init__(self, softmax_choice:Optional[Union[WEIGHTED_AVG, ARG_MAX, PROBABILISTIC]]=WEIGHTED_AVG, storage_prob:float=1.0, memory_decay_rate:Union[float,AUTO]=AUTO, - enable_learning:Union[bool,list]=True, - learn_field_weights:bool=True, - learning_rate:float=None, + purge_by_field_weights:bool=False, + enable_learning:bool=True, + target_fields:Optional[Union[list, tuple, np.ndarray]]=None, use_storage_node:bool=True, use_gating_for_weighting:bool=False, random_state=None, @@ -1665,17 +1596,30 @@ def __init__(self, # Construct memory -------------------------------------------------------------------------------- memory_fill = memory_fill or 0 # FIX: GET RID OF THIS ONCE IMPLEMENTED AS A Parameter - self._validate_memory_specs(memory_template, memory_capacity, memory_fill, field_weights, field_names, name) + self._validate_memory_specs(memory_template, + memory_capacity, + memory_fill, + field_weights, + field_names, + name) + memory_template, memory_capacity = self._parse_memory_template(memory_template, memory_capacity, memory_fill) - field_weights, field_names, concatenate_queries = self._parse_fields(field_weights, - normalize_field_weights, - field_names, - concatenate_queries, - normalize_memories, - learning_rate, - name) + (field_names, + field_weights, + learn_field_weights, + target_fields, + concatenate_queries) = self._parse_fields(fields, + field_names, + field_weights, + learn_field_weights, + learning_rate, + normalize_field_weights, + concatenate_queries, + normalize_memories, + target_fields, + name) if memory_decay_rate is AUTO: memory_decay_rate = 1 / memory_capacity @@ -1690,27 +1634,29 @@ def __init__(self, super().__init__(name=name, memory_template = memory_template, memory_capacity = memory_capacity, - field_weights = field_weights, field_names = field_names, + field_weights = field_weights, + learn_field_weights=learn_field_weights, + learning_rate = learning_rate, normalize_field_weights = normalize_field_weights, concatenate_queries = concatenate_queries, + normalize_memories = normalize_memories, softmax_gain = softmax_gain, softmax_threshold = softmax_threshold, softmax_choice = softmax_choice, storage_prob = storage_prob, memory_decay_rate = memory_decay_rate, - normalize_memories = normalize_memories, - enable_learning=enable_learning, - learn_field_weights = learn_field_weights, - learning_rate = learning_rate, + purge_by_field_weights = purge_by_field_weights, + enable_learning = enable_learning, + target_fields = target_fields, random_state = random_state, seed = seed, **kwargs ) - self._validate_options_with_learning(enable_learning, + self._validate_options_with_learning(learn_field_weights, use_gating_for_weighting, - learn_field_weights, + enable_learning, softmax_choice) self._construct_pathways(self.memory_template, @@ -1724,8 +1670,8 @@ def __init__(self, self.storage_prob, self.memory_decay_rate, self._use_storage_node, - self.enable_learning, self.learn_field_weights, + self.enable_learning, self._use_gating_for_weighting) # if torch_available: @@ -1767,7 +1713,7 @@ def __init__(self, # self.scheduler.add_condition(self.storage_node, conditions.AllHaveRun(*self.retrieved_nodes)) # # Generates the desired execution set for a single pass, and runs with expected results, - # but generates warning messages for every node of the following sort: + # but raises a warning messages for every node of the following sort: # /Users/jdc/PycharmProjects/PsyNeuLink/psyneulink/core/scheduling/scheduler.py:120: # UserWarning: BeforeNCalls((EMStorageMechanism STORAGE MECHANISM), 1) is dependent on # (EMStorageMechanism STORAGE MECHANISM), but you are assigning (EMStorageMechanism STORAGE MECHANISM) @@ -1827,7 +1773,7 @@ def _validate_memory_specs(self, memory_template, memory_capacity, memory_fill, for entry in memory_template: if not (len(entry) == num_fields and np.all([len(entry[i]) == len(memory_template[0][i]) for i in range(num_fields)])): - raise EMCompositionError(f"The 'memory_template' arg for {self.name} must specify a list " + raise EMCompositionError(f"The 'memory_template' arg for {name} must specify a list " f"or 2d array that has the same shape for all entries.") # Validate memory_fill specification (int, float, or tuple with two scalars) @@ -1837,24 +1783,35 @@ def _validate_memory_specs(self, memory_template, memory_capacity, memory_fill, raise EMCompositionError(f"The 'memory_fill' arg ({memory_fill}) specified for {name} " f"must be a float, int or len tuple of ints and/or floats.") - # If enable_learning is a list of bools, it must match the len of 1st dimension (axis 0) of memory_template: - if isinstance(self.enable_learning, list) and len(self.enable_learning) != num_fields: - raise EMCompositionError(f"The number of items ({len(self.enable_learning)}) in the 'enable_learning' arg " - f"for {name} must match the number of fields in memory " - f"({num_fields}).") + # If learn_field_weights is a list of bools, it must match the len of 1st dimension (axis 0) of memory_template: + if isinstance(self.learn_field_weights, list) and len(self.learn_field_weights) != num_fields: + raise EMCompositionError(f"The number of items ({len(self.learn_field_weights)}) in the " + f"'learn_field_weights' arg for {name} must match the number of " + f"fields in memory ({num_fields}).") + + _field_wts = np.atleast_1d(field_weights) + _field_wts_len = len(_field_wts) # If len of field_weights > 1, must match the len of 1st dimension (axis 0) of memory_template: - field_weights_len = len(np.atleast_1d(field_weights)) - if field_weights is not None and field_weights_len > 1 and field_weights_len != num_fields: - raise EMCompositionError(f"The number of items ({field_weights_len}) in the 'field_weights' arg " - f"for {name} must match the number of items in an entry of memory " - f"({num_fields}).") + if field_weights is not None: + if (_field_wts_len > 1 and _field_wts_len != num_fields): + raise EMCompositionError(f"The number of items ({_field_wts_len}) in the 'field_weights' arg " + f"for {name} must match the number of items in an entry of memory " + f"({num_fields}).") + # Deal with this here instead of Parameter._validate_field_weights since this is called before super() + if all([fw is None for fw in _field_wts]): + raise EMCompositionError(f"The entries in 'field_weights' arg for {name} can't all be 'None' " + f"since that will preclude the construction of any keys.") + if all([fw in {0, None} for fw in _field_wts]): + warnings.warn(f"All of the entries in the 'field_weights' arg for {name} are either None or " + f"set to 0; this will result in no retrievals unless/until the 0(s) is(are) changed " + f"to a positive value.") # If field_names has more than one value it must match the first dimension (axis 0) of memory_template: if field_names and len(field_names) != num_fields: raise EMCompositionError(f"The number of items ({len(field_names)}) " f"in the 'field_names' arg for {name} must match " - f"the number of fields ({field_weights_len}).") + f"the number of fields ({_field_wts_len}).") def _parse_memory_template(self, memory_template, memory_capacity, memory_fill)->(np.ndarray,int): """Construct memory from memory_template and memory_fill @@ -1943,15 +1900,55 @@ def _construct_entries(entry_template, num_entries, memory_fill=None)->np.ndarra return memory, memory_capacity def _parse_fields(self, + fields, + field_names, field_weights, + learn_field_weights, + learning_rate, normalize_field_weights, - field_names, concatenate_queries, normalize_memories, - learning_rate, - name): + target_fields, + name)->(list, list, list, bool): + + def _parse_fields_dict(name, fields, num_fields)->(list,list,list,list): + """Parse fields dict into field_names, field_weights, learn_field_weights, and target_fields""" + if len(fields) != num_fields: + raise EMCompositionError(f"The number of entries ({len(fields)}) in the dict specified in the 'fields' " + f"arg of '{name}' does not match the number of fields in its memory " + f"({self.num_fields}).") + field_names = [None] * num_fields + field_weights = [None] * num_fields + learn_field_weights = [None] * num_fields + target_fields = [None] * num_fields + for i, field_name in enumerate(fields): + field_names[i] = field_name + if isinstance(fields[field_name], (tuple, list)): + # field specified as tuple or list + field_weights[i] = fields[field_name][0] + learn_field_weights[i] = fields[field_name][1] + target_fields[i] = fields[field_name][2] + elif isinstance(fields[field_name], dict): + # field specified as dict + field_weights[i] = fields[field_name][FIELD_WEIGHT] + learn_field_weights[i] = fields[field_name][LEARN_FIELD_WEIGHT] + target_fields[i] = fields[field_name][TARGET_FIELD] + else: + raise EMCompositionError(f"Unrecognized specification for field '{field_name}' in the 'fields' " + f"arg of '{name}'; it must be a tuple, list or dict.") + return field_names, field_weights, learn_field_weights, target_fields - num_fields = len(self.entry_template) + self.num_fields = len(self.entry_template) + + if fields: + # If a fields dict has been specified, use that to assign field_names, field_weights & learn_field_weights + if any([field_names, field_weights, learn_field_weights, target_fields]): + warnings.warn(f"The 'fields' arg for '{name}' was specified, so any of the 'field_names', " + f"'field_weights', 'learn_field_weights' or 'target_fields' args will be ignored.") + (field_names, + field_weights, + learn_field_weights, + target_fields) = _parse_fields_dict(name, fields, self.num_fields) # Deal with default field_weights if field_weights is None: @@ -1959,36 +1956,43 @@ def _parse_fields(self, field_weights = [1] else: # Default is to treat all fields as keys except the last one, which is the value - field_weights = [1] * num_fields - field_weights[-1] = 0 + field_weights = [1] * self.num_fields + field_weights[-1] = None field_weights = np.atleast_1d(field_weights) - # Fill out field_weights, normalizing if specified: - if len(field_weights) == 1: - if normalize_field_weights: - parsed_field_weights = np.repeat(field_weights / np.sum(field_weights), len(self.entry_template)) - else: - parsed_field_weights = np.repeat(field_weights[0], len(self.entry_template)) + if normalize_field_weights and not all([fw == 0 for fw in field_weights]): # noqa: E127 + fld_wts_0s_for_Nones = [fw if fw is not None else 0 for fw in field_weights] + parsed_field_weights = fld_wts_0s_for_Nones / np.sum(fld_wts_0s_for_Nones) + parsed_field_weights = [pfw if fw is not None else None + for pfw, fw in zip(parsed_field_weights, field_weights)] else: - if normalize_field_weights: - parsed_field_weights = np.array(field_weights) / np.sum(field_weights) - else: - parsed_field_weights = field_weights + parsed_field_weights = field_weights + + # If only one field_weight was specified, but there is more than one field, + # repeat the single weight for each field + if len(field_weights) == 1 and self.num_fields > 1: + parsed_field_weights = np.repeat(parsed_field_weights, self.num_fields) + + # Make sure field_weight learning was not specified for any value fields (since they don't have field_weights) + if isinstance(learn_field_weights, (list, tuple, np.ndarray)): + for i, lfw in enumerate(learn_field_weights): + if parsed_field_weights[i] is None and lfw is not False: + warnings.warn(f"Learning was specified for field '{field_names[i]}' in the 'learn_field_weights' " + f"arg for '{name}', but it is not allowed for value fields; it will be ignored.") # Memory structure Parameters parsed_field_names = field_names.copy() if field_names is not None else None # Set memory field attributes - self.num_fields = len(self.entry_template) - keys_weights = [i for i in parsed_field_weights if i != 0] + keys_weights = [i for i in parsed_field_weights if i is not None] self.num_keys = len(keys_weights) # Get indices of field_weights that specify keys and values: - self.key_indices = np.flatnonzero(parsed_field_weights) + self.key_indices = [i for i, pfw in enumerate(parsed_field_weights) if pfw is not None] assert len(self.key_indices) == self.num_keys, \ f"PROGRAM ERROR: number of keys ({self.num_keys}) does not match number of " \ f"non-zero values in field_weights ({len(self.key_indices)})." - self.value_indices = np.where(parsed_field_weights==0)[0] + self.value_indices = [i for i, pfw in enumerate(parsed_field_weights) if pfw is None] self.num_values = self.num_fields - self.num_keys assert len(self.value_indices) == self.num_values, \ f"PROGRAM ERROR: number of values ({self.num_values}) does not match number of " \ @@ -2014,7 +2018,6 @@ def _parse_fields(self, # field weights are not all equal and/or # normalize_memories is False and/or # there is only one key - fw_error_msg = nm_error_msg = fw_correction_msg = nm_correction_msg = None if self.num_keys == 1: error_msg = f"there is only one key" correction_msg = "" @@ -2028,8 +2031,17 @@ def _parse_fields(self, warnings.warn(f"The 'concatenate_queries' arg for '{name}' is True but {error_msg}; " f"concatenation will be ignored.{correction_msg}") + # Deal with default target_fields + if target_fields is None: + target_fields = [True] * self.num_fields + self.learning_rate = learning_rate - return parsed_field_weights, parsed_field_names, parsed_concatenate_queries + + return (parsed_field_names, + parsed_field_weights, + learn_field_weights, + target_fields, + parsed_concatenate_queries) def _parse_memory_shape(self, memory_template): """Parse shape of memory_template to determine number of entries and fields""" @@ -2063,8 +2075,8 @@ def _construct_pathways(self, storage_prob, memory_decay_rate, use_storage_node, - enable_learning, learn_field_weights, + enable_learning, use_gating_for_weighting, ): """Construct Nodes and Pathways for EMComposition""" @@ -2076,15 +2088,15 @@ def _construct_pathways(self, # First, construct Nodes of Composition with their Projections self.query_input_nodes = self._construct_query_input_nodes(field_weights) self.value_input_nodes = self._construct_value_input_nodes(field_weights) - self.input_nodes = self.query_input_nodes + self.value_input_nodes + self.query_and_value_input_nodes = self.query_input_nodes + self.value_input_nodes # Get list of nodes in order specified in self.field_names - self.input_nodes_by_fields = [None] * len(field_weights) + self.input_nodes = [None] * len(field_weights) for i in range(self.num_keys): - self.input_nodes_by_fields[self.key_indices[i]] = self.query_input_nodes[i] + self.input_nodes[self.key_indices[i]] = self.query_input_nodes[i] for i in range(self.num_values): - self.input_nodes_by_fields[self.value_indices[i]] = self.value_input_nodes[i] - assert all(self.input_nodes_by_fields), "PROGRAM ERROR: input_nodes_by_fields not fully populated." + self.input_nodes[self.value_indices[i]] = self.value_input_nodes[i] + assert all(self.input_nodes), "PROGRAM ERROR: input_nodes not fully populated." self.concatenate_queries_node = self._construct_concatenate_queries_node(concatenate_queries) self.match_nodes = self._construct_match_nodes(memory_template, memory_capacity, @@ -2118,6 +2130,42 @@ def _construct_pathways(self, assert not self.field_weight_nodes, \ f"PROGRAM ERROR: There should be no field_weight_nodes for concatenated queries." + # Create field_index map for nodes and projections + _field_index_map = {} + for i in range(len(self.input_nodes)): + _field_index_map[self.input_nodes[i]] = i + if self._use_storage_node: + _field_index_map[self.storage_node.path_afferents[i]] = i + _field_index_map[self.retrieved_nodes[i]] = i + _field_index_map[self.retrieved_nodes[i].path_afferents[0]] = i + if self.concatenate_queries: + for proj in self.concatenate_queries_node.path_afferents: + _field_index_map[proj] = _field_index_map[proj.sender.owner] + _field_index_map[self.concatenate_queries_node] = None + _field_index_map[self.match_nodes[0]] = None + _field_index_map[self.match_nodes[0].path_afferents[0]] = None + _field_index_map[self.match_nodes[0].efferents[0]] = None + else: + # Input nodes, Projections to storage_node, retrieval Projections and retrieved_nodes + for match_node in self.match_nodes: + field_index = _field_index_map[match_node.path_afferents[0].sender.owner] + # match_node + _field_index_map[match_node] = field_index + # afferent MEMORY Projection + _field_index_map[match_node.path_afferents[0]] = field_index + # efferent Projection to weighted_match_node + _field_index_map[match_node.efferents[0]] = field_index + # weighted_match_node + _field_index_map[match_node.efferents[0].receiver.owner] = field_index + # Projection to combined_matches_node + _field_index_map[match_node.efferents[0].receiver.owner.efferents[0]] = field_index + for field_weight_node in self.field_weight_nodes: + # Weight nodes; + _field_index_map[field_weight_node] = _field_index_map[field_weight_node.efferents[0].receiver.owner] + # Weight Projections; + _field_index_map[field_weight_node.efferents[0]] = _field_index_map[field_weight_node] + self._field_index_map = _field_index_map + # Construct Pathways -------------------------------------------------------------------------------- # LEARNING NOT ENABLED -------------------------------------------------- @@ -2141,8 +2189,8 @@ def _construct_pathways(self, # Query-specific pathways if not self.concatenate_queries: if self.num_keys == 1: - self.add_linear_processing_pathway([self.query_input_nodes[i], - self.match_nodes[i], + self.add_linear_processing_pathway([self.query_input_nodes[0], + self.match_nodes[0], self.softmax_node]) else: for i in range(self.num_keys): @@ -2204,15 +2252,12 @@ def _construct_value_input_nodes(self, field_weights)->list: where i is selected randomly without replacement from (0->memory_capacity) """ - # Get indices of field_weights that specify keys: - value_indices = np.where(field_weights == 0)[0] - - assert len(value_indices) == self.num_values, \ + assert len(self.value_indices) == self.num_values, \ f"PROGRAM ERROR: number of values ({self.num_values}) does not match number of " \ - f"non-zero values in field_weights ({len(value_indices)})." + f"non-zero values in field_weights ({len(self.value_indices)})." value_input_nodes = [ProcessingMechanism( - input_shapes=len(self.entry_template[value_indices[i]]), + input_shapes=len(self.entry_template[self.value_indices[i]]), name= f'{self.value_names[i]} [VALUE]') for i in range(self.num_values)] @@ -2271,6 +2316,7 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_q normalize=args[0][NORMALIZE]), name=f'MEMORY')}, name='MATCH')] + match_nodes[0]._field_idx = 0 # One node for each key else: @@ -2414,19 +2460,19 @@ def _construct_softmax_node(self, memory_capacity, softmax_gain, softmax_thresho return softmax_node def _validate_options_with_learning(self, - enable_learning, - use_gating_for_weighting, learn_field_weights, + use_gating_for_weighting, + enable_learning, softmax_choice): - if use_gating_for_weighting and learn_field_weights: - warnings.warn(f"The 'learn_field_weights' option for '{self.name}' cannot be used with " + if use_gating_for_weighting and enable_learning: + warnings.warn(f"The 'enable_learning' option for '{self.name}' cannot be used with " f"'use_gating_for_weighting' set to True; this will generate an error if its " f"'learn' method is called. Set 'use_gating_for_weighting' to True in order " f"to enable learning of field weights.") if softmax_choice in {ARG_MAX, PROBABILISTIC} and enable_learning: warnings.warn(f"The 'softmax_choice' arg of '{self.name}' is set to '{softmax_choice}' with " - f"'enable_learning' set to True (or a list); this will generate an error if its " + f"'enable_learning' set to True; this will generate an error if its " f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") def _construct_retrieved_nodes(self, memory_template)->list: @@ -2474,7 +2520,8 @@ def _construct_storage_node(self, and from the value_input_node to the retrieved_node for values. The `function ` of the `EMSorageMechanism` that takes the following arguments: - - **variable** -- template for an `entry ` in `memory`; + - **variable** -- template for an `entry ` + in `memory`; - **fields** -- the `input_nodes ` for the corresponding `fields ` of an `entry ` in `memory `; @@ -2503,7 +2550,7 @@ def _construct_storage_node(self, storage_node = EMStorageMechanism(default_variable=[self.input_nodes[i].value[0] for i in range(self.num_fields)], fields=[self.input_nodes[i] for i in range(self.num_fields)], - field_types=[0 if weight == 0 else 1 for weight in field_weights], + field_types=[0 if weight is None else 1 for weight in field_weights], concatenation_node=concatenate_queries_node, memory_matrix=memory_template, learning_signals=learning_signals, @@ -2519,12 +2566,36 @@ def _set_learning_attributes(self): # 7/10/24 FIX: SHOULD THIS ALSO BE CONSTRAINED BY VALUE OF field_weights FOR CORRESPONDING FIELD? # (i.e., if it is zero then not learnable? or is that a valid initial condition?) for projection in self.projections: - if (projection.sender.owner in self.field_weight_nodes - and self.enable_learning - and self.learn_field_weights): - projection.learnable = True + + projection_is_field_weight = projection.sender.owner in self.field_weight_nodes + + if self.enable_learning is False or not projection_is_field_weight: + projection.learnable = False + continue + + # Use globally specified learning_rate + if self.learn_field_weights is None: # Default, which should be treat same as True + learning_rate = True + elif isinstance(self.learn_field_weights, (bool, int, float)): + learning_rate = self.learn_field_weights + + # Use individually specified learning_rate else: + # FIX: THIS NEEDS TO USE field_index_map, BUT THAT DOESN'T SEEM TO HAVE THE WEIGHT PROJECTION YET + learning_rate = self.learn_field_weights[self._field_index_map[projection]] + + if learning_rate is False: projection.learnable = False + continue + elif learning_rate is True: + # Default (EMComposition's learning_rate) is used for all field_weight Projections: + learning_rate = self.learning_rate + assert isinstance(learning_rate, (int, float)), \ + (f"PROGRAM ERROR: learning_rate for {projection.sender.owner.name} is not a valid value.") + + projection.learnable = True + if projection.learning_mechanism: + projection.learning_mechanism.learning_rate = learning_rate #endregion @@ -2567,10 +2638,9 @@ def _encode_memory(self, context=None): """ # Get least used slot (i.e., weakest memory = row of matrix with lowest weights) computed across all fields - purge_by_field_weights = False field_norms = np.array([np.linalg.norm(field, axis=1) for field in [row for row in self.parameters.memory.get(context)]]) - if purge_by_field_weights: + if self.purge_by_field_weights: field_norms *= self.field_weights row_norms = np.sum(field_norms, axis=1) idx_of_min = np.argmin(row_norms) @@ -2623,11 +2693,11 @@ def learn(self, *args, **kwargs)->list: """Override to check for inappropriate use of ARG_MAX or PROBABILISTIC options for retrieval with learning""" softmax_choice = self.parameters.softmax_choice.get(kwargs[CONTEXT]) use_gating_for_weighting = self._use_gating_for_weighting - learn_field_weights = self.parameters.learn_field_weights.get(kwargs[CONTEXT]) + enable_learning = self.parameters.enable_learning.get(kwargs[CONTEXT]) - if use_gating_for_weighting and learn_field_weights: + if use_gating_for_weighting and enable_learning: raise EMCompositionError(f"Field weights cannot be learned when 'use_gating_for_weighting' is True; " - f"Construct '{self.name}' with the 'learn_field_weights' arg set to False.") + f"Construct '{self.name}' with the 'enable_learning' arg set to False.") if softmax_choice in {ARG_MAX, PROBABILISTIC}: raise EMCompositionError(f"The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg " @@ -2646,19 +2716,19 @@ def _get_execution_mode(self, execution_mode): return execution_mode def _identify_target_nodes(self, context)->list: - """Identify retrieval_nodes specified by **enable_learning** as TARGET nodes""" - enable_learning = self.parameters.enable_learning._get(context) - if enable_learning is False: - if self.learn_field_weights: - warnings.warn(f"The 'learn_field_weights' arg for {self.name} is True " - f"but its 'enable_learning' is False, so learn_field_weights will have no effect.") + """Identify retrieval_nodes specified by **target_field_weights** as TARGET nodes""" + target_fields = self.target_fields + if target_fields is False: + if self.enable_learning: + warnings.warn(f"The 'enable_learning' arg for {self.name} is True " + f"but its 'target_fields' is False, so enable_learning will have no effect.") target_nodes = [] - elif enable_learning is True: + elif target_fields is True: target_nodes = [node for node in self.retrieved_nodes] - elif isinstance(enable_learning, list): - target_nodes = [node for node in self.retrieved_nodes if enable_learning[self.retrieved_nodes.index(node)]] + elif isinstance(target_fields, list): + target_nodes = [node for node in self.retrieved_nodes if target_fields[self.retrieved_nodes.index(node)]] else: - assert False, (f"PROGRAM ERROR: enable_learning arg for {self.name}: {enable_learning} " + assert False, (f"PROGRAM ERROR: target_fields arg for {self.name}: {target_fields} " f"is neither True, False nor a list of bools as it should be.") super()._identify_target_nodes(context) return target_nodes @@ -2666,7 +2736,7 @@ def _identify_target_nodes(self, context)->list: def infer_backpropagation_learning_pathways(self, execution_mode, context=None): if self.concatenate_queries: raise EMCompositionError(f"EMComposition does not support learning with 'concatenate_queries'=True.") - super().infer_backpropagation_learning_pathways(execution_mode, context=context) + return super().infer_backpropagation_learning_pathways(execution_mode, context=context) def do_gradient_optimization(self, retain_in_pnl_options, context, optimization_num=None): # 7/10/24 - MAKE THIS CONTEXT DEPENDENT: CALL super() IF BEING EXECUTED ON ITS OWN? diff --git a/psyneulink/library/compositions/pytorchEMcompositionwrapper.py b/psyneulink/library/compositions/pytorchEMcompositionwrapper.py index 38c67017ca..fca4856e4e 100644 --- a/psyneulink/library/compositions/pytorchEMcompositionwrapper.py +++ b/psyneulink/library/compositions/pytorchEMcompositionwrapper.py @@ -46,16 +46,16 @@ def __init__(self, *args, **kwargs): # ProjectionWrappers for match nodes learning_signals_for_match_nodes = pnl_storage_mech.learning_signals[:num_match_fields] pnl_match_projs = [match_node_learning_signal.efferents[0].receiver.owner - for match_node_learning_signal in learning_signals_for_match_nodes] + for match_node_learning_signal in learning_signals_for_match_nodes] self.match_projection_wrappers = [self.projections_map[pnl_match_proj] - for pnl_match_proj in pnl_match_projs] + for pnl_match_proj in pnl_match_projs] # ProjectionWrappers for retrieve nodes learning_signals_for_retrieve_nodes = pnl_storage_mech.learning_signals[num_match_fields:] pnl_retrieve_projs = [retrieve_node_learning_signal.efferents[0].receiver.owner - for retrieve_node_learning_signal in learning_signals_for_retrieve_nodes] + for retrieve_node_learning_signal in learning_signals_for_retrieve_nodes] self.retrieve_projection_wrappers = [self.projections_map[pnl_retrieve_proj] - for pnl_retrieve_proj in pnl_retrieve_projs] + for pnl_retrieve_proj in pnl_retrieve_projs] def execute_node(self, node, variable, optimization_num, context): """Override to handle storage of entry to memory_matrix by EMStorage Function""" @@ -134,19 +134,26 @@ def store_memory(self, memory_to_store, context): idx_of_weakest_memory = torch.argmin(row_norms) values = [] - for i, field_projection in enumerate(self.match_projection_wrappers + self.retrieve_projection_wrappers): - if i < num_match_fields: - # For match projections, get entry to store from value of sender of Projection matrix - # (this is to accomodate concatenation_node) - axis = 0 + for field_projection in self.match_projection_wrappers + self.retrieve_projection_wrappers: + field_idx = self._composition._field_index_map[field_projection._pnl_proj] + if field_projection in self.match_projection_wrappers: + # For match projections: + # - get entry to store from value of sender of Projection matrix (to accommodate concatenation_node) entry_to_store = field_projection.sender.output + # - store in row + axis = 0 if concatenation_node is None: - assert (entry_to_store == memory_to_store[i]).all(), \ - f"PROGRAM ERROR: misalignment between inputs and fields for storing them" + # Double check that the memory passed in is the output of the projection for the correct field + assert (entry_to_store == + memory_to_store[field_idx]).all(), \ + (f"PROGRAM ERROR: misalignment between memory to be stored (input passed to store_memory) " + f"and value of projection to corresponding field.") else: - # For retrieve projections, get entry to store from memory_to_store (which has inputs to all fields) + # For retrieve projections: + # - get entry to store from memory_to_store (which has inputs to all fields) + entry_to_store = memory_to_store[field_idx] + # - store in column axis = 1 - entry_to_store = memory_to_store[i - num_match_fields] # Get matrix containing memories for the field from the Projection field_memory_matrix = field_projection.matrix diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index d2af70bee9..e70a683a7c 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -43,7 +43,7 @@ def test_two_calls_no_args(self): test_structure_data = [ # NOTE: None => use default value (i.e., don't specify in constructor, rather than forcing None as value of arg) # ------------------ SPECS --------------------------------------------- ------- EXPECTED ------------------- - # memory_template memory_fill field_wts cncat_ky nmlze sm_gain repeat #fields #keys #vals concat + # memory_template memory_fill field_wts cncat_qy nmlze sm_gain repeat #fields #keys #vals concat (0, (2,3), None, None, None, None, None, False, 2, 1, 1, False,), (0.1, (2,3), .1, None, None, None, None, False, 2, 1, 1, False,), (0.2, (2,3), (0,.1), None, None, None, None, False, 2, 1, 1, False,), @@ -61,35 +61,38 @@ def test_two_calls_no_args(self): (6, [[0,0,0],[0],[0,0]], None, [1,1,1], False, None, None, False, 3, 3, 0, False,), (7, [[0,0,0],[0],[0,0]], None, [1,1,1], True, None, None, False, 3, 3, 0, True,), (7.1, [[0,0,0],[0],[0,0]], None, [1,1,1], True , False, None, False, 3, 3, 0, False,), - (8, [[0,0],[0,0],[0,0]], None, [1,2,0], None, None, None, False, 3, 2, 1, False,), - (8.1, [[0,0],[0,0],[0,0]], None, [1,2,0], True, None, None, False, 3, 2, 1, False,), - (9, [[0,1],[0,0],[0,0]], None, [1,2,0], None, None, None, [0,1], 3, 2, 1, False,), - (9.1, [[0,1],[0,0,0],[0,0]], None, [1,2,0], None, None, None, [0,1], 3, 2, 1, False,), - (10, [[0,1],[0,0,0],[0,0]], .1, [1,2,0], None, None, None, [0,1], 3, 2, 1, False,), - (11, [[0,0],[0,0,0],[0,0]], .1, [1,2,0], None, None, None, False, 3, 2, 1, False,), + (8, [[0,0],[0,0],[0,0]], None, [1,2,None], None, None, None, False, 3, 2, 1, False,), + (8.1, [[0,0],[0,0],[0,0]], None, [1,2,None], True, None, None, False, 3, 2, 1, False,), + (8.2, [[0,0],[0,0],[0,0]], None, [1,1,None], True, None, None, False, 3, 2, 1, True,), + (8.3, [[0,0],[0,0],[0,0]], None, [1,1,0], True, None, None, False, 3, 3, 0, False,), + (8.4, [[0,0],[0,0],[0,0]], None, [0,0,0], True, None, None, False, 3, 3, 0, True,), + (9, [[0,1],[0,0],[0,0]], None, [1,2,None], None, None, None, [0,1], 3, 2, 1, False,), + (9.1, [[0,1],[0,0,0],[0,0]], None, [1,2,None], None, None, None, [0,1], 3, 2, 1, False,), + (10, [[0,1],[0,0,0],[0,0]], .1, [1,2,None], None, None, None, [0,1], 3, 2, 1, False,), + (11, [[0,0],[0,0,0],[0,0]], .1, [1,2,None], None, None, None, False, 3, 2, 1, False,), (12, [[[0,0],[0,0],[0,0]], # two entries specified, fields all same length, both entries have all 0's [[0,0],[0,0],[0,0]]], .1, [1,1,1], None, None, None, 2, 3, 3, 0, False,), (12.1, [[[0,0],[0,0,0],[0,0]], # two entries specified, fields have different lenghts, entries all have 0's - [[0,0],[0,0,0],[0,0]]], .1, [1,1,0], None, None, None, 2, 3, 2, 1, False,), + [[0,0],[0,0,0],[0,0]]], .1, [1,1,None], None, None, None, 2, 3, 2, 1, False,), (12.2, [[[0,0],[0,0,0],[0,0]], # two entries specified, first has 0's - [[0,2],[0,0,0],[0,0]]], .1, [1,1,0], None, None, None, 2, 3, 2, 1, False,), + [[0,2],[0,0,0],[0,0]]], .1, [1,1,None], None, None, None, 2, 3, 2, 1, False,), (12.3, [[[0,1],[0,0,0],[0,0]], # two entries specified, fields have same weights, but concatenate is False - [[0,2],[0,0,0],[0,0]]], .1, [1,1,0], None, None, None, 2, 3, 2, 1, False), + [[0,2],[0,0,0],[0,0]]], .1, [1,1,None], None, None, None, 2, 3, 2, 1, False), (13, [[[0,1],[0,0,0],[0,0]], # two entries specified, fields have same weights, and concatenate_queries is True - [[0,2],[0,0,0],[0,0]]], .1, [1,1,0], True, None, None, 2, 3, 2, 1, True), + [[0,2],[0,0,0],[0,0]]], .1, [1,1,None], True, None, None, 2, 3, 2, 1, True), (14, [[[0,1],[0,0,0],[0,0]], # two entries specified, all fields are keys [[0,2],[0,0,0],[0,0]]], .1, [1,1,1], None, None, None, 2, 3, 3, 0, False), (15, [[[0,1],[0,0,0],[0,0]], # two entries specified; fields have different weights, constant memory_fill - [[0,2],[0,0,0],[0,0]]], .1, [1,2,0], None, None, None, 2, 3, 2, 1, False), + [[0,2],[0,0,0],[0,0]]], .1, [1,2,None], None, None, None, 2, 3, 2, 1, False), (15.1, [[[0,1],[0,0,0],[0,0]], # two entries specified; fields have different weights, random memory_fill - [[0,2],[0,0,0],[0,0]]], (0,.1), [1,2,0], None, None, None, 2, 3, 2, 1, False), + [[0,2],[0,0,0],[0,0]]], (0,.1),[1,2,None], None, None, None, 2, 3, 2, 1, False), (16, [[[0,1],[0,0,0],[0,0]], # three entries specified [[0,2],[0,0,0],[0,0]], - [[0,3],[0,0,0],[0,0]]], .1, [1,2,0], None, None, None, 3, 3, 2, 1, False), + [[0,3],[0,0,0],[0,0]]], .1, [1,2,None], None, None, None, 3, 3, 2, 1, False), (17, [[[0,1],[0,0,0],[0,0]], # all four entries allowed by memory_capacity specified [[0,2],[0,0,0],[0,0]], [[0,3],[0,0,0],[0,0]], - [[0,4],[0,0,0],[0,0]]], .1, [1,2,0], None, None, None, 4, 3, 2, 1, False), + [[0,4],[0,0,0],[0,0]]], .1, [1,2,None], None, None, None, 4, 3, 2, 1, False), ] args_names = "test_num, memory_template, memory_fill, field_weights, concatenate_queries, normalize_memories, " \ "softmax_gain, repeat, num_fields, num_keys, num_values, concatenate_node" @@ -244,14 +247,204 @@ def test_softmax_choice_error(self, softmax_choice): em.parameters.softmax_choice.set(softmax_choice) em.learn() - @pytest.mark.parametrize("softmax_choice", [pnl.ARG_MAX, pnl.PROBABILISTIC]) - def test_softmax_choice_warn(self, softmax_choice): - warning_msg = (f"The 'softmax_choice' arg of '.*' is set to '{softmax_choice}' with " - f"'enable_learning' set to True \\(or a list\\); this will generate an error if its " - f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") + for softmax_choice in [pnl.ARG_MAX, pnl.PROBABILISTIC]: + with pytest.warns(UserWarning) as warning: + em = EMComposition(softmax_choice=softmax_choice, enable_learning=True) + warning_msg = (f"The 'softmax_choice' arg of '{em.name}' is set to '{softmax_choice}' with " + f"'enable_learning' set to True; this will generate an error if its " + f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") + assert warning_msg in str(warning[0].message) + + def test_fields_arg(self): + + em = EMComposition(memory_template=(5,1), + memory_capacity=1, + normalize_field_weights=False, + fields={'A': (1.2, 3.4, True), + 'B': (None, False, True), + 'C': (0, True, True), + 'D': (7.8, False, True), + 'E': (5.6, True, True)}) + assert em.num_fields == 5 + assert em.num_keys == 4 + assert (em.field_weights == [1.2, None, 0, 7.8, 5.6]).all() + assert (em.learn_field_weights == [3.4, False, True, False, True]).all() + np.testing.assert_allclose(em.target_fields, [True, True, True, True, True]) + + # # Test wrong number of entries + with pytest.raises(EMCompositionError) as error_text: + EMComposition(memory_template=(3,1), memory_capacity=1, fields={'A': (1.2, 3.4)}) + assert error_text.value.error_value == (f"The number of entries (1) in the dict specified in the 'fields' arg " + f"of 'EM_Composition' does not match the number of fields in its " + f"memory (3).") + # Test dual specification of fields and corresponding args and learning specified for value field + with pytest.warns(UserWarning) as warning: + EMComposition(memory_template=(2,1), + memory_capacity=1, + fields={'A': (1.2, 3.4, True), + 'B': (None, True, True)}, + field_weights=[10, 11.0]) + warning_msg_1 = (f"The 'fields' arg for 'EM_Composition' was specified, so any of the 'field_names', " + f"'field_weights', 'learn_field_weights' or 'target_fields' args will be ignored.") + warning_msg_2 = (f"Learning was specified for field 'B' in the 'learn_field_weights' arg for " + f"'EM_Composition', but it is not allowed for value fields; it will be ignored.") + assert warning_msg_1 in str(warning[0].message) + assert warning_msg_2 in str(warning[1].message) + + + + field_names = ['KEY A','VALUE A', 'KEY B','KEY VALUE','VALUE LEARN'] + field_weights = [1, None, 2, 0, None] + learn_field_weights = [True, False, .01, False, False] + target_fields = [True, False, False, True, True] + dict_subdict = {} + for i, fn in enumerate(field_names): + dict_subdict[fn] = {pnl.FIELD_WEIGHT: field_weights[i], + pnl.LEARN_FIELD_WEIGHT: learn_field_weights[i], + pnl.TARGET_FIELD: target_fields[i]} + dict_tuple = {fn:(fw,lfw,tf) for fn,fw,lfw,tf in zip(field_names, + field_weights, + learn_field_weights, + target_fields)} + test_field_map_and_args_assignment_data = [ + ('args', None, field_names, field_weights, learn_field_weights, target_fields), + ('dict-subdict', dict_subdict, None, None, None, None), + ('dict-tuple', dict_tuple, None, None, None, None)] + field_arg_names = "format, fields, field_names, field_weights, learn_field_weights, target_fields" + + @pytest.mark.parametrize(field_arg_names, test_field_map_and_args_assignment_data, + ids=[x[0] for x in test_field_map_and_args_assignment_data]) + def test_field_args_and_map_assignments(self, + format, + fields, + field_names, + field_weights, + learn_field_weights, + target_fields): + # individual args + em = EMComposition(memory_template=(5,2), + memory_capacity=2, + fields=fields, + field_names=field_names, + field_weights=field_weights, + learn_field_weights=learn_field_weights, + target_fields=target_fields, + learning_rate=0.5) + assert em.num_fields == 5 + assert em.num_keys == 3 + for actual, expected in zip(em.field_weights, [0.33333333, None, 0.66666667, 0, None]): + if expected is None: + assert actual is None + else: + np.testing.assert_allclose(actual, expected) + + # Validate targets for target_fields + np.testing.assert_allclose(em.target_fields, [True, False, False, True, True]) + learning_components = em.infer_backpropagation_learning_pathways(pnl.ExecutionMode.PyTorch) + assert len(learning_components) == 3 + assert 'TARGET for KEY A [RETRIEVED]' in learning_components[0].name + assert 'TARGET for KEY VALUE [RETRIEVED]' in learning_components[1].name + assert 'TARGET for VALUE LEARN [RETRIEVED]' in learning_components[2].name + + # Validate learning specs for field weights + # Presence or absence of field weight components based on keys vs. values: + assert ['KEY A [WEIGHT]' in node.name for node in em.nodes] + assert ['KEY B [WEIGHT]' in node.name for node in em.nodes] + assert ['KEY VALUE [WEIGHT]' in node.name for node in em.nodes] + assert not any('VALUE A [WEIGHT]' in node.name for node in em.nodes) + assert not any('VALUE LEARN [WEIGHT]' in node.name for node in em.nodes) + assert not any('WEIGHT to WEIGHTED MATCH for VALUE A' in proj.name for proj in em.projections) + assert not any('WEIGHT to WEIGHTED MATCH for VALUE LEARN' in proj.name for proj in em.projections) + # Learnability and learning rate for field weights + # FIX: ONCE LEARNING IS FULLY IMPLEMENTED FOR FIELD WEIGHTS, VALIDATE THAT: + # KEY A USES COMPOSITION DEFAULT LEARNING RATE OF .5 + # KEY B USES INDIVIDUALLY ASSIGNED LEARNING RATE OF .01 + assert em.learn_field_weights == [True, False, .01, False, False] + assert em.projections['WEIGHT to WEIGHTED MATCH for KEY A'].learnable + assert em.projections['WEIGHT to WEIGHTED MATCH for KEY B'].learnable + assert not em.projections['WEIGHT to WEIGHTED MATCH for KEY VALUE'].learnable - with pytest.warns(UserWarning, match=warning_msg): - EMComposition(softmax_choice=softmax_choice, enable_learning=True) + # Validate _field_index_map + assert em._field_index_map[[k for k in em._field_index_map.keys() + if ('MappingProjection from KEY A [QUERY][OutputPort-0] to STORE[InputPort-0]') + in k.name][0]]==0 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'KEY A [QUERY]' in k.name][0]]==0 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'KEY A [MATCH to KEYS]' in k.name][0]]==0 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'KEY A [WEIGHTED MATCH]' in k.name][0]]==0 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'KEY A [RETRIEVED]' in k.name][0]]==0 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'MEMORY FOR KEY A [RETRIEVE KEY]' + in k.name][0]]==0 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'VALUE A [VALUE]' in k.name][0]] == 1 + assert em._field_index_map[[k for k in em._field_index_map.keys() if + ('VALUE A [VALUE][OutputPort-0] to STORE[InputPort-1]') in k.name][0]] == 1 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'VALUE A [RETRIEVED]' in k.name][0]] == 1 + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'MEMORY FOR VALUE A' in k.name][0]] == 1 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'KEY B [QUERY]' in k.name][0]] == 2 + assert em._field_index_map[[k for k in em._field_index_map.keys() + if ('KEY B [QUERY][OutputPort-0] to STORE[InputPort-2]') in k.name][0]] == 2 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'KEY B [RETRIEVED]' in k.name][0]] == 2 + assert (em._field_index_map[[k for k in em._field_index_map.keys() + if 'MEMORY FOR KEY B [RETRIEVE KEY]' in k.name][0]] == 2) + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'KEY VALUE [QUERY]' in k.name][0]] == 3 + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'KEY VALUE [QUERY][OutputPort-0] to STORE[InputPort-3]' in k.name][0]] == 3 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'KEY VALUE [RETRIEVED]' in k.name][0]] == 3 + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'MEMORY FOR KEY VALUE [RETRIEVE KEY]' in k.name][0]] == 3 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'VALUE LEARN [VALUE]' in k.name][0]] == 4 + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'VALUE LEARN [VALUE][OutputPort-0] to STORE[InputPort-4]' in k.name][0]] == 4 + assert (em._field_index_map[[k for k in em._field_index_map.keys() + if 'VALUE LEARN [RETRIEVED]' in k.name][0]] == 4) + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'VALUE LEARN [VALUE]' in k.name][0]] == 4 + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'MEMORY FOR VALUE LEARN [RETRIEVE VALUE]' in k.name][0]] == 4 + assert (em._field_index_map[[k for k in em._field_index_map.keys() + if 'MEMORY for KEY A [KEY]' in k.name][0]] == 0) + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'MATCH to WEIGHTED MATCH for KEY A' in k.name][0]] == 0 + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'WEIGHTED MATCH for KEY A to COMBINE MATCHES' in k.name][0]] == 0 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'KEY B [MATCH to KEYS]' in k.name][0]] == 2 + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'MEMORY for KEY B [KEY]' in k.name][0]] == 2 + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'MATCH to WEIGHTED MATCH for KEY B' in k.name][0]] == 2 + assert (em._field_index_map[[k for k in em._field_index_map.keys() + if 'KEY B [WEIGHTED MATCH]' in k.name][0]] == 2) + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'WEIGHTED MATCH for KEY B to COMBINE MATCHES' in k.name][0]] == 2 + assert (em._field_index_map[[k for k in em._field_index_map.keys() + if 'KEY VALUE [MATCH to KEYS]' in k.name][0]] == 3) + assert em._field_index_map[[k for k in em._field_index_map.keys() if + 'MEMORY for KEY VALUE [KEY]' in k.name][0]] == 3 + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'MATCH to WEIGHTED MATCH for KEY VALUE' in k.name][0]] == 3 + assert (em._field_index_map[[k for k in em._field_index_map.keys() + if 'KEY VALUE [WEIGHTED MATCH]' in k.name][0]] == 3) + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'WEIGHTED MATCH for KEY VALUE to COMBINE MATCHES' in k.name][0]] == 3 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'KEY B [WEIGHT]' in k.name][0]] == 2 + assert em._field_index_map[[k for k in em._field_index_map.keys() if 'KEY VALUE [WEIGHT]' in k.name][0]] == 3 + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'WEIGHT to WEIGHTED MATCH for KEY VALUE' in k.name][0]] == 3 + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'WEIGHT to WEIGHTED MATCH for KEY A' in k.name][0]] == 0 + assert em._field_index_map[[k for k in em._field_index_map.keys() + if 'WEIGHT to WEIGHTED MATCH for KEY B' in k.name][0]] == 2 + + def test_field_weights_all_None_and_or_0(self): + with pytest.raises(EMCompositionError) as error_text: + EMComposition(memory_template=(3,1), memory_capacity=1, field_weights=[None, None, None]) + assert error_text.value.error_value == (f"The entries in 'field_weights' arg for EM_Composition can't all " + f"be 'None' since that will preclude the construction of any keys.") + + with pytest.warns(UserWarning) as warning: + EMComposition(memory_template=(3,1), memory_capacity=1, field_weights=[0, None, 0]) + warning_msg = (f"All of the entries in the 'field_weights' arg for EM_Composition are either None or set to 0; " + f"this will result in no retrievals unless/until the 0(s) is(are) changed to a positive value.") + assert warning_msg in str(warning[0].message) @pytest.mark.pytorch @@ -265,21 +458,21 @@ class TestExecution: # ---------------------------------------------------------------------------------- ------------------------ (0, [[[1,2,3],[4,6]], [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], None, 3, 0, [1,0], None, None, 100, 0, [[[1, 2, 3]]], [[1., 2., 3.16585899], - [4., 6.16540637]]), + [[1,2,10],[4,10]]], None, 3, 0, [1,None], None, None, 100, 0, [[[1, 2, 3]]], [[1., 2., 3.16585899], + [4., 6.16540637]]), (1, [[[1,2,3],[4,6]], [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], None, 3, 0, [1,0], None, None, 100, 0, [[1, 2, 3], - [4, 6]], [[1., 2., 3.16585899], - [4., 6.16540637]]), + [[1,2,10],[4,10]]], None, 3, 0, [1,None], None, None, 100, 0, [[1, 2, 3], + [4, 6]], [[1., 2., 3.16585899], + [4., 6.16540637]]), (2, [[[1,2,3],[4,6]], [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], None, 3, 0, [1,0], None, None, 100, 0, [[1, 2, 3], - [4, 8]], [[1., 2., 3.16585899], - [4., 6.16540637]]), + [[1,2,10],[4,10]]], None, 3, 0, [1,None], None, None, 100, 0, [[1, 2, 3], + [4, 8]], [[1., 2., 3.16585899], + [4., 6.16540637]]), (3, [[[1,2,3],[4,6]], [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, 0, [1,0], None, None, 100, 0, [[1, 2, 3], + [[1,2,10],[4,10]]], (0,.01), 4, 0, [1,None], None, None, 100, 0, [[1, 2, 3], [4, 8]], [[0.99998628, 1.99997247, 3.1658154 ], @@ -352,11 +545,11 @@ class TestExecution: 6.38682264]]), (12, [[[1],[2],[3]], # Scalar keys - exact match (this tests use of L0 for retreieval in MEMORY matrix) - [[10],[0],[100]]], (0,.01), 3, 0, [1,1,0], None, None, pnl.ARG_MAX, 1, [[10],[0],[100]], + [[10],[0],[100]]], (0,.01), 3, 0, [1,1,None], None, None, pnl.ARG_MAX, 1, [[10],[0],[100]], [[10],[0],[100]]), (13, [[[1],[2],[3]], # Scalar keys - close match (this tests use of L0 for retreieval in MEMORY matrix - [[10],[0],[100]]], (0,.01), 3, 0, [1,1,0], None, None, pnl.ARG_MAX, 1, [[2],[3],[4]], [[1],[2],[3]]), + [[10],[0],[100]]], (0,.01), 3, 0, [1,1,None], None, None, pnl.ARG_MAX, 1, [[2],[3],[4]], [[1],[2],[3]]), ] args_names = "test_num, memory_template, memory_fill, memory_capacity, memory_decay_rate, field_weights, " \ @@ -364,13 +557,13 @@ class TestExecution: @pytest.mark.parametrize(args_names, test_execution_data, ids=[x[0] for x in test_execution_data]) - @pytest.mark.parametrize('enable_learning', [False, True], ids=['no_learning','learning']) + @pytest.mark.parametrize('learn_field_weights', [False, True], ids=['no_learning','learning']) @pytest.mark.composition @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.Python, pnl.ExecutionMode.PyTorch], ids=['Python','PyTorch']) def test_simple_execution_without_learning(self, exec_mode, - enable_learning, + learn_field_weights, test_num, memory_template, memory_capacity, @@ -388,12 +581,12 @@ def test_simple_execution_without_learning(self, # # pytest.skip('Execution of EMComposition not yet supported for LLVM Mode.') # Restrict testing of learning configurations (which are much larger) to select tests - if enable_learning and test_num not in {10}: + if learn_field_weights and test_num not in {10}: pytest.skip('Limit tests of learning to subset of parametrizations (for efficiency)') params = {'memory_template': memory_template, 'memory_capacity': memory_capacity, - 'enable_learning': enable_learning, + 'learn_field_weights': learn_field_weights, } # Add explicit argument specifications only for args that are not None # (to avoid forcing to None in constructor) @@ -406,7 +599,7 @@ def test_simple_execution_without_learning(self, if concatenate_queries is not None: params.update({'concatenate_queries': concatenate_queries}) # FIX: DELETE THE FOLLOWING ONCE CONCATENATION IS IMPLEMENTED FOR LEARNING - params.update({'enable_learning': False}) + params.update({'learn_field_weights': False}) if normalize_memories is not None: params.update({'normalize_memories': normalize_memories}) if softmax_gain is not None: @@ -470,58 +663,129 @@ def test_simple_execution_without_learning(self, memory_fill = memory_fill or 0 assert all(elem == memory_fill for elem in em.memory[-1]) - @pytest.mark.parametrize('data', - (([[[5], [0], [10]], # 1d template + @pytest.mark.parametrize('test_field_weights_0_vs_None_data', + (([[[5], [0], [10]], # 1d memory template [[0], [5], [10]], [[0.1], [0.1], [10]], [[0.1], [0.1], [10]]], - [[5], [5], [10]], # 1d query - pnl.L0 # 1d retrieval operation + [[5], [5], [10]], # 1d query + pnl.L0), # 1d retrieval operation + ([[[5,0], [0,5], [10,10]], # 2d memory template + [[0,5], [5,0], [10,10]], + [[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]], + [[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]]], + [[5,0], [5,0], [10,10]], # 2d query + pnl.DOT_PRODUCT), # 2d retrieval operation ), - ([[[5,0], [0,5], [10]], # 2d template - [[0,5], [5,0], [10]], - [[0.1, 0.1], [0.1, 0.1], [0.1]], - [[0.1, 0.1], [0.1, 0.1], [0.1]]], - [[5,0], [5,0], [10]], # 2d query - pnl.DOT_PRODUCT)), # 2d retrieval operation ids=['1d', '2d']) + @pytest.mark.parametrize('field_weights', [[.75, .25, 0], [.75, .25, None]], ids=['0','None']) + @pytest.mark.parametrize('softmax_choice', [pnl.MAX_VAL, pnl.ARG_MAX], ids=['MAX_VAL','ARG_MAX']) + @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.Python, + pnl.ExecutionMode.PyTorch, + # pnl.ExecutionMode.LLVM + ], + ids=['Python', + 'PyTorch', + # 'LLVM' + ]) @pytest.mark.composition - @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.Python, pnl.ExecutionMode.PyTorch]) - def test_em_field_weights_assignment(self, exec_mode, data): - EM_assign_template = data[0] - em = pnl.EMComposition(memory_template=EM_assign_template, + def test_assign_field_weights_and_0_vs_None(self, + field_weights, + softmax_choice, + test_field_weights_0_vs_None_data, + exec_mode): + memory_template = test_field_weights_0_vs_None_data[0] + query = test_field_weights_0_vs_None_data[1] + operation = test_field_weights_0_vs_None_data[2] + + em = pnl.EMComposition(memory_template=memory_template, memory_capacity=4, memory_decay_rate= 0, - memory_fill=0.001, - enable_learning = False, - softmax_choice=pnl.ARG_MAX, - field_weights=(.75,.25,0), + learn_field_weights = False, + softmax_choice=softmax_choice, + field_weights=field_weights, field_names=['A','B','C']) - # Confirm initial weight assginments (that favor A) + # Confirm initial weight assignments (that favor A) assert em.nodes['A [WEIGHT]'].input_port.defaults.variable == [.75] assert em.nodes['B [WEIGHT]'].input_port.defaults.variable == [.25] + if field_weights[2] == 0: + assert 'C [QUERY]' in em.nodes.names + assert len(em.field_weight_nodes) == 3 + assert em.nodes['C [WEIGHT]'].input_port.defaults.variable == [0] + elif field_weights[2] is None: + assert 'C [VALUE]' in em.nodes.names + assert len(em.field_weight_nodes) == 2 + assert 'C [WEIGHT]' not in em.nodes.names + # Confirm use of L0 for retrieval since keys for A and B are scalars - assert em.projections['MEMORY for A [KEY]'].function.operation == data[2] - assert em.projections['MEMORY for B [KEY]'].function.operation == data[2] - # Change fields weights to favor B - em.field_weights = [0,1,0] - # Ensure weights got changed - assert em.nodes['A [WEIGHT]'].input_port.defaults.variable == [0] - assert em.nodes['B [WEIGHT]'].input_port.defaults.variable == [1] - # Note: The input matches both fields A and B; - test_input = {em.nodes['A [QUERY]']: [data[1][0]], - em.nodes['B [QUERY]']: [data[1][1]], - em.nodes['C [VALUE]']: [data[1][2]]} - result = em.run(test_input, execution_mode=exec_mode) - # If the weights change DIDN'T get used, it should favor field A and return [5,0,10] as the best match - # If weights change DID get used, it should favor field B and return [0,5,10] as the best match - for i,j in zip(result, data[0][1]): - assert (i == j).all() - # Change weights back and confirm that it now favors A - em.field_weights = [1,0,0] + assert em.projections['MEMORY for A [KEY]'].function.operation == operation + assert em.projections['MEMORY for B [KEY]'].function.operation == operation + if field_weights[2] == 0: + assert em.projections['MEMORY for C [KEY]'].function.operation == operation + + A = em.nodes['A [QUERY]'] + B = em.nodes['B [QUERY]'] + C = em.nodes['C [QUERY]' if field_weights[2] == 0 else 'C [VALUE]'] + + # Note: The input matches both fields A and B + test_input = {A: [query[0]], + B: [query[1]], + C: [query[2]]} result = em.run(test_input, execution_mode=exec_mode) - for i,j in zip(result, data[0][0]): - assert (i == j).all() + # Note: field_weights favors A + if softmax_choice == pnl.MAX_VAL: + if operation == pnl.L0: + expected = [[1.70381182], [0.], [3.40762364]] + else: + expected = [[1.56081243, 0.0], [0.0, 1.56081243], [3.12162487, 3.12162487]] + else: + expected = memory_template[0] + np.testing.assert_allclose(result, expected) + + # Change fields weights to favor C + if field_weights[2] is None: + with pytest.raises(EMCompositionError) as error_text: + em.field_weights = np.array([0,0,1]) + assert error_text.value.error_value == (f"Field 'C' of 'EM_Composition' was originally assigned " + f"as a value node (i.e., with a field_weight = None); " + f"this cannot be changed after construction. If you want to " + f"change it to a key field, you must re-construct the " + f"EMComposition using a scalar for its field in the " + f"`field_weights` arg (including 0.") + else: + em.field_weights = np.array([0,0,1]) + # Ensure weights got changed + assert em.nodes['A [WEIGHT]'].input_port.defaults.variable == [0] + assert em.nodes['B [WEIGHT]'].input_port.defaults.variable == [0] + assert em.nodes['C [WEIGHT]'].input_port.defaults.variable == [1] + # Note: The input matches both fields A and B; + test_input = {em.nodes['A [QUERY]']: [query[0]], + em.nodes['B [QUERY]']: [query[1]], + em.nodes['C [QUERY]']: [query[2]]} + result = em.run(test_input, execution_mode=exec_mode) + # If the weights change DIDN'T get used, it should favor field A and return [5,0,10] as the best match + # If weights change DID get used, it should favor field B and return [0,5,10] as the best match + if softmax_choice == pnl.MAX_VAL: + if operation == pnl.L0: + expected = [[2.525], [2.525], [10]] + else: + expected = [[2.525, 1.275], [2.525, 1.275], [7.525, 7.525]] + else: + expected = memory_template[0] + np.testing.assert_allclose(result, expected) + + # Change weights back and confirm that it now favors A + em.field_weights = [0,1,0] + result = em.run(test_input, execution_mode=exec_mode) + if softmax_choice == pnl.MAX_VAL: + if operation == pnl.L0: + expected = [[3.33333333], [5], [10]] + else: + expected = [[3.33333333, 1.66666667], [5, 0], [10, 10]] + else: + expected = memory_template[1] + np.testing.assert_allclose(result, expected) + @pytest.mark.composition @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.Python, pnl.ExecutionMode.PyTorch]) @@ -540,7 +804,9 @@ def test_multiple_trials_concatenation_and_storage_node(self, exec_mode, concate softmax_gain=100, memory_fill=(0,.001), concatenate_queries=concatenate, - enable_learning=learning, + # learn_field_weights=learning, + learn_field_weights=False, + enable_learning=True, use_storage_node=use_storage_node) inputs = [[[[1,2,3]],[[4,5,6]],[[10,20,30]],[[40,50,60]],[[100,200,300]],[[400,500,600]]], @@ -574,3 +840,221 @@ def test_multiple_trials_concatenation_and_storage_node(self, exec_mode, concate [[2.5, 3.125, 3.75 ], [2.5625, 3.1875, 3.8125]]] em.learn(inputs=inputs, execution_mode=exec_mode) np.testing.assert_equal(em.memory, expected_memory) + + @pytest.mark.composition + def test_backpropagation_of_error_in_learning(self): + """This test is based on the EGO CSW Model""" + + import torch + torch.manual_seed(0) + state_input_layer = pnl.ProcessingMechanism(name='STATE', input_shapes=11) + previous_state_layer = pnl.ProcessingMechanism(name='PREVIOUS STATE', input_shapes=11) + context_layer = pnl.TransferMechanism(name='CONTEXT', + input_shapes=11, + function=pnl.Tanh, + integrator_mode=True, + integration_rate=.69) + em = EMComposition(name='EM', + memory_template=[[0] * 11, [0] * 11, [0] * 11], # context + memory_fill=(0,.0001), + memory_capacity=50, + memory_decay_rate=0, + softmax_gain=10, + softmax_threshold=.001, + fields = {'STATE': {pnl.FIELD_WEIGHT: None, + pnl.LEARN_FIELD_WEIGHT: False, + pnl.TARGET_FIELD: True}, + 'PREVIOUS_STATE': {pnl.FIELD_WEIGHT:.5, + pnl.LEARN_FIELD_WEIGHT: False, + pnl.TARGET_FIELD: False}, + 'CONTEXT': {pnl.FIELD_WEIGHT:.5, + pnl.LEARN_FIELD_WEIGHT: False, + pnl.TARGET_FIELD: False}}, + normalize_field_weights=True, + normalize_memories=False, + concatenate_queries=False, + enable_learning=True, + learning_rate=.5, + device=pnl.CPU + ) + prediction_layer = pnl.ProcessingMechanism(name='PREDICTION', input_shapes=11) + + QUERY = ' [QUERY]' + VALUE = ' [VALUE]' + RETRIEVED = ' [RETRIEVED]' + + # Pathways + state_to_previous_state_pathway = [state_input_layer, + pnl.MappingProjection(matrix=pnl.IDENTITY_MATRIX, + learnable=False), + previous_state_layer] + state_to_context_pathway = [state_input_layer, + pnl.MappingProjection(matrix=pnl.IDENTITY_MATRIX, + learnable=False), + context_layer] + state_to_em_pathway = [state_input_layer, + pnl.MappingProjection(sender=state_input_layer, + receiver=em.nodes['STATE' + VALUE], + matrix=pnl.IDENTITY_MATRIX, + learnable=False), + em] + previous_state_to_em_pathway = [previous_state_layer, + pnl.MappingProjection(sender=previous_state_layer, + receiver=em.nodes['PREVIOUS_STATE' + QUERY], + matrix=pnl.IDENTITY_MATRIX, + learnable=False), + em] + context_learning_pathway = [context_layer, + pnl.MappingProjection(sender=context_layer, + matrix=pnl.IDENTITY_MATRIX, + receiver=em.nodes['CONTEXT' + QUERY], + learnable=True), + em, + pnl.MappingProjection(sender=em.nodes['STATE' + RETRIEVED], + receiver=prediction_layer, + matrix=pnl.IDENTITY_MATRIX, + learnable=False), + prediction_layer] + + # Composition + EGO = pnl.AutodiffComposition([state_to_previous_state_pathway, + state_to_context_pathway, + state_to_em_pathway, + previous_state_to_em_pathway, + context_learning_pathway], + learning_rate=.5, + loss_spec=pnl.Loss.BINARY_CROSS_ENTROPY, + device=pnl.CPU) + + learning_components = EGO.infer_backpropagation_learning_pathways(pnl.ExecutionMode.PyTorch) + assert len(learning_components) == 1 + assert learning_components[0].name == 'TARGET for PREDICTION' + EGO.add_projection(pnl.MappingProjection(sender=state_input_layer, + receiver=learning_components[0], + learnable=False)) + + EGO.scheduler.add_condition(em, pnl.BeforeNodes(previous_state_layer, context_layer)) + + INPUTS = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]] + + result = EGO.learn(inputs={'STATE':INPUTS}, learning_rate=.5, execution_mode=pnl.ExecutionMode.PyTorch) + expected = [[ 0.00000000e+00, 1.35476414e-03, 1.13669378e-03, 2.20434260e-03, 6.61008388e-04, 9.88672202e-01, + 6.52088276e-04, 1.74149507e-03, 1.09769133e-03, 2.47971436e-03, 0.00000000e+00], + [ 0.00000000e+00, -6.75284069e-02, -1.28930436e-03, -2.10726610e-01, -1.41050716e-03, -5.92286989e-01, + -2.75196416e-03, -2.21010605e-03, -7.14369243e-03, -2.05167374e-02, 0.00000000e+00], + [ 0.00000000e+00, 1.18578255e-03, 1.29393181e-03, 1.35476414e-03, 1.13669378e-03, 2.20434260e-03, + 6.61008388e-04, 9.88672202e-01, 6.52088276e-04, 2.83918640e-03, 0.00000000e+00]] + np.testing.assert_allclose(result, expected) + + # Plot (for during debugging): + # + # TARGETS = [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + # [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + # [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + # [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + # [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + # [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + # [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + # [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + # [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + # [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + # [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]] + # + # fig, axes = plt.subplots(3, 1, figsize=(5, 12)) + # axes[0].imshow(EGO.projections[7].parameters.matrix.get(EGO.name), interpolation=None) + # axes[1].plot((1 - np.abs(EGO.results[1:50,2]-TARGETS[:49])).sum(-1)) + # axes[1].set_xlabel('Stimuli') + # axes[1].set_ylabel('loss_spec') + # axes[2].plot( (EGO.results[1:50,2]*TARGETS[:49]).sum(-1) ) + # axes[2].set_xlabel('Stimuli') + # axes[2].set_ylabel('Correct Logit') + # plt.suptitle(f"Blocked Training") + # plt.show() From bf1e6b38f5c65511d1b4e94340023218bf3d2c5e Mon Sep 17 00:00:00 2001 From: jdcpni Date: Fri, 22 Nov 2024 11:36:24 -0500 Subject: [PATCH 29/34] patch/autodiff_pnl_showgraph (#3125) --- .../Coffee Shop World/EGO CSW Model.py | 1 + psyneulink/core/globals/keywords.py | 3 ++- .../library/compositions/pytorchshowgraph.py | 16 ++++++++++++---- tests/composition/test_emcomposition.py | 1 - 4 files changed, 15 insertions(+), 6 deletions(-) diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/EGO CSW Model.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/EGO CSW Model.py index 52423c7dbb..f8703e3cab 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/EGO CSW Model.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/Coffee Shop World/EGO CSW Model.py @@ -252,6 +252,7 @@ def construct_model(model_name:str=model_params['name'], [0] * state_size], # context memory_fill=memory_init, memory_capacity=memory_capacity, + normalize_memories=False, memory_decay_rate=0, softmax_gain=retrieval_softmax_gain, softmax_threshold=retrieval_softmax_threshold, diff --git a/psyneulink/core/globals/keywords.py b/psyneulink/core/globals/keywords.py index 415ed2d7c2..7543fda313 100644 --- a/psyneulink/core/globals/keywords.py +++ b/psyneulink/core/globals/keywords.py @@ -118,6 +118,7 @@ 'PROCESS_EXECUTE', 'PROCESS_INIT', 'PROCESSES', 'PROCESSES_DIM', 'PROCESSING', 'PROCESSING_MECHANISM', 'PROCESSING_PATHWAY', 'PRODUCT', 'PROGRESS_BAR_CHAR', 'PROJECTION', 'PROJECTION_DIRECTION', 'PROJECTION_PARAMS', 'PROJECTION_RECEIVER', 'PROJECTION_SENDER', 'PROJECTION_TYPE', 'PROJECTIONS', 'PROJECTION_COMPONENT_CATEGORY', + 'PNL', 'QUOTIENT', 'RANDOM', 'RANDOM_CONNECTIVITY_MATRIX', 'RATE', 'RATIO', 'REARRANGE_FUNCTION', 'RECEIVER', 'RECEIVER_ARG', 'RECURRENT_TRANSFER_MECHANISM', 'REDUCE_FUNCTION', 'REFERENCE_VALUE', 'RESET', 'RESET_STATEFUL_FUNCTION_WHEN', 'RELU_FUNCTION', 'REST', 'RESULT', 'RESULT', 'ROLES', 'RL_FUNCTION', 'RUN', @@ -143,7 +144,6 @@ from psyneulink._typing import Literal - #region ----------------------------------------- MATRICES ----------------------------------------------------------- class MatrixKeywords: @@ -462,6 +462,7 @@ class Loss(Enum): #region --------------------------------------------- GENERAL ---------------------------------------------------- # General +PNL = 'psyneulink' ON = True OFF = False diff --git a/psyneulink/library/compositions/pytorchshowgraph.py b/psyneulink/library/compositions/pytorchshowgraph.py index 6452ccf9f6..3e7131011a 100644 --- a/psyneulink/library/compositions/pytorchshowgraph.py +++ b/psyneulink/library/compositions/pytorchshowgraph.py @@ -12,16 +12,17 @@ from psyneulink._typing import Optional, Union, Literal -from psyneulink.core.globals.context import ContextFlags, handle_external_context from psyneulink.core.compositions import NodeRole -from psyneulink.core.compositions.showgraph import ShowGraph, SHOW_JUST_LEARNING_PROJECTIONS +from psyneulink.core.compositions.showgraph import ShowGraph, SHOW_JUST_LEARNING_PROJECTIONS, SHOW_LEARNING from psyneulink.core.components.mechanisms.mechanism import Mechanism from psyneulink.core.components.mechanisms.processing.compositioninterfacemechanism import CompositionInterfaceMechanism from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism from psyneulink.core.components.projections.projection import Projection from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection from psyneulink.core.components.projections.modulatory.controlprojection import ControlProjection -from psyneulink.core.globals.keywords import BOLD, NESTED, INSET +from psyneulink.core.llvm import ExecutionMode +from psyneulink.core.globals.context import ContextFlags, handle_external_context +from psyneulink.core.globals.keywords import BOLD, INSET, NESTED, PNL __all__ = ['SHOW_PYTORCH'] @@ -55,7 +56,14 @@ def __init__(self, *args, **kwargs): @beartype @handle_external_context(source=ContextFlags.COMPOSITION) def show_graph(self, *args, **kwargs): - """Override of show_graph to check if show_pytorch==True and if so build pytorch rep of autofiffcomposition""" + """Override of show_graph to check for autodiff-specific options + If show_pytorch==True, build pytorch rep of autofiffcomposition + If show_learning==PNL, infer backpropagation learning pathways for Python version of graph + """ + if SHOW_LEARNING in kwargs and kwargs[SHOW_LEARNING] == PNL: + self.composition.infer_backpropagation_learning_pathways(ExecutionMode.Python) + kwargs[SHOW_LEARNING] = True + return super().show_graph(*args, **kwargs) self.show_pytorch = kwargs.pop(SHOW_PYTORCH, self.show_pytorch) context = kwargs.get('context') if self.show_pytorch: diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index e70a683a7c..4413eb0963 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -996,7 +996,6 @@ def test_backpropagation_of_error_in_learning(self): np.testing.assert_allclose(result, expected) # Plot (for during debugging): - # # TARGETS = [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], From 7b20f1308674211958ba8cb296a330174f3afbac Mon Sep 17 00:00:00 2001 From: jdcpni Date: Mon, 25 Nov 2024 05:53:50 -0500 Subject: [PATCH 30/34] Refactor/emcomposition/fields class (#3126) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * • emcomposition.py - add field class - refactor to use fields for all node attributes/arrays --- .../library/compositions/emcomposition.py | 685 ++++++++++-------- tests/composition/test_emcomposition.py | 142 ++-- 2 files changed, 449 insertions(+), 378 deletions(-) diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index b850621760..163a17b319 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -206,8 +206,9 @@ *Fields* ~~~~~~~~ -These arguments are used to specify the names of the fields in a memory entry, which are used as keys and how those are -weighted for retrieval, and whether those weights are learned. +These arguments are used to specify the names of the fields in a memory entry, which are used for its keys and values, +how keys are weighted for retrieval, whether those weights are learned, and which fields are used for computing error +that is propagated through the EMComposition. .. _EMComposition_Field_Specification_Dict: @@ -508,7 +509,9 @@ is in the form of a 3d array, in which rows (axis 0) are entries, columns (axis 1) are fields, and items (axis 2) are the values of an entry in a given field. The number of fields is determined by the `memory_template ` argument of the EMComposition's constructor, and the number of entries is determined -by the `memory_capacity ` argument. +by the `memory_capacity ` argument. Information about the fields is stored in the +`fields ` attribute, which is a list of `Field` objects containing information about the nodes +and values associated with each field. .. _EMComposition_Memory_Storage: .. technical_note:: @@ -923,6 +926,7 @@ """ import numpy as np import warnings +from enum import Enum import psyneulink.core.scheduling.condition as conditions @@ -943,15 +947,18 @@ from psyneulink.core.globals.keywords import \ (ADAPTIVE, ALL, ARG_MAX, ARG_MAX_INDICATOR, AUTO, CONTEXT, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, DOT_PRODUCT, EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, GAIN, IDENTITY_MATRIX, INPUT_SHAPES, L0, - MULTIPLICATIVE_PARAM, NAME, PARAMS, PROB_INDICATOR, PRODUCT, PROJECTIONS, RANDOM, VARIABLE) -from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric_scalar + MULTIPLICATIVE_PARAM, NAME, PARAMS, PROB_INDICATOR, PRODUCT, PROJECTIONS, RANDOM, VALUE, VARIABLE) +from psyneulink.core.globals.utilities import \ + ContentAddressableList, convert_all_elements_to_np_array, is_numeric_scalar from psyneulink.core.globals.registry import name_without_suffix from psyneulink.core.llvm import ExecutionMode -__all__ = ['EMComposition', 'EMCompositionError', 'FIELD_WEIGHT', 'LEARN_FIELD_WEIGHT', +__all__ = ['EMComposition', 'EMCompositionError', 'FIELD_WEIGHT', 'KEY', 'LEARN_FIELD_WEIGHT', 'PROBABILISTIC', 'TARGET_FIELD','WEIGHTED_AVG'] +KEY = 'key' + # softmax_choice options: STORAGE_PROB = 'storage_prob' WEIGHTED_AVG = ALL @@ -1054,6 +1061,89 @@ def __str__(self): return repr(self.error_value) +class FieldType(Enum): + KEY = 0 + VALUE = 1 + + +class Field(): + """Object that contains information about a field in an EMComposition's `memory `. + """ + name = None + def __init__(self, + name:str=None, + index:int=None, + type:FieldType=None, + weight:float=None, + learn_weight:bool=None, + learning_rate:float=None, + target:bool=None): + self.name = name + self.index = index + self.type = type + self.weight = weight + self.learn_weight = learn_weight + self.learning_rate = learning_rate + self.target = target + self.input_node = None + self.match_node = None + self.weight_node = None + self.weighted_match_node = None + self.retrieved_node = None + # Projections for all fields: + self.storage_projection = None # Projection from input_node to storage_node + self.retrieve_projection = None # Projection from softmax_node ("RETRIEVE" node) to retrieved_node + # Projections for key fields: + self.memory_projection = None # Projection from query_input_node to match_node + self.concatenation_projection = None # Projection from query_input_node to concatenate_queries_node + self.match_projection = None # Projection from match_node to weighted_match_node + self.weight_projection = None # Projection from weight_node to weighted_match_node + self.weighted_match_projection = None # Projection from weighted_match_node to combined_matches_node + + @property + def nodes(self): + """Return all Nodes assigned to the field.""" + return [node for node in + [self.input_node, + self.match_node, + self.weighted_match_node, + self.weight_node, + self.retrieved_node] + if node is not None] + @property + def projections(self): + """Return all Projections assigned to the field.""" + return [proj for proj in [self.memory_projection, + self.storage_projection, + self.match_projection, + self.weight_projection, + self.weighted_match_projection, + self.retrieve_projection] + if proj is not None] + @property + def query(self): + return self.input_node.variable + + @property + def match(self): + return self.match_node.value + + @property + def weighted_match(self): + return self.weighted_match_node.value + + @property + def retrieved_memory(self): + return self.retrieve_node.value + + @property + def memories(self): + return self.retrieve_node.path_afferents[0].matrix + + def retrieval_operation(self): + return self.retrieve_node.path_afferents[0].function.operation + + class EMComposition(AutodiffComposition): """ EMComposition( \ @@ -1176,7 +1266,7 @@ class EMComposition(AutodiffComposition): .. technical_note:: use_storage_node : bool : default True specifies whether to use a `LearningMechanism` to store entries in `memory `. - If False, a method on EMComposition is used rather than a LearningMechanism. This is meant for + If False, a method on EMComposition is used rather than a LearningMechanism. This is meant for debugging, and precludes use of `import_composition ` to integrate the EMComposition into another Composition; to do so, use_storage_node must be True (default). @@ -1199,6 +1289,10 @@ class EMComposition(AutodiffComposition): COMMENT executing its `run ` or learn methods with the entry as the ``inputs`` argument. + fields : ContentAddressableList[Field] + list of `Field` objects, each of which contains information about the nodes and values of a field in the + EMComposition's memory (see `Field`). + .. _EMComposition_Parameters: memory_capacity : int @@ -1606,6 +1700,9 @@ def __init__(self, memory_template, memory_capacity = self._parse_memory_template(memory_template, memory_capacity, memory_fill) + + self.fields = ContentAddressableList(component_type=Field) + (field_names, field_weights, learn_field_weights, @@ -1654,8 +1751,7 @@ def __init__(self, **kwargs ) - self._validate_options_with_learning(learn_field_weights, - use_gating_for_weighting, + self._validate_options_with_learning(use_gating_for_weighting, enable_learning, softmax_choice) @@ -1979,6 +2075,10 @@ def _parse_fields_dict(name, fields, num_fields)->(list,list,list,list): if parsed_field_weights[i] is None and lfw is not False: warnings.warn(f"Learning was specified for field '{field_names[i]}' in the 'learn_field_weights' " f"arg for '{name}', but it is not allowed for value fields; it will be ignored.") + elif learn_field_weights in {None, True, False}: + learn_field_weights = [False] * len(parsed_field_weights) + else: + assert False, f"PROGRAM ERROR: learn_field_weights ({learn_field_weights}) is not a list, tuple or bool." # Memory structure Parameters parsed_field_names = field_names.copy() if field_names is not None else None @@ -2004,7 +2104,12 @@ def _parse_fields_dict(name, fields, num_fields)->(list,list,list,list): self.value_names = [parsed_field_names[i] for i in range(self.num_fields) if i not in self.key_indices] else: self.key_names = [f'{i}' for i in range(self.num_keys)] if self.num_keys > 1 else ['KEY'] - self.value_names = [f'{i} [VALUE]' for i in range(self.num_values)] if self.num_values > 1 else ['VALUE'] + if self.num_values > 1: + self.value_names = [f'{i} [VALUE]' for i in range(self.num_values)] + elif self.num_values == 1: + self.value_names = ['VALUE'] + else: + self.value_names = [] parsed_field_names = self.key_names + self.value_names user_specified_concatenate_queries = concatenate_queries or False @@ -2037,6 +2142,18 @@ def _parse_fields_dict(name, fields, num_fields)->(list,list,list,list): self.learning_rate = learning_rate + for i, name, weight, learn_weight, target in zip(range(self.num_fields), + parsed_field_names, + parsed_field_weights, + learn_field_weights, + target_fields): + self.fields.append(Field(name=name, + index=i, + type=FieldType.KEY if weight is not None else FieldType.VALUE, + weight=weight, + learn_weight=learn_weight, + target=target)) + return (parsed_field_names, parsed_field_weights, learn_field_weights, @@ -2083,45 +2200,16 @@ def _construct_pathways(self, # Construct Nodes -------------------------------------------------------------------------------- - field_weighting = len([weight for weight in field_weights if weight]) > 1 and not concatenate_queries - - # First, construct Nodes of Composition with their Projections - self.query_input_nodes = self._construct_query_input_nodes(field_weights) - self.value_input_nodes = self._construct_value_input_nodes(field_weights) - self.query_and_value_input_nodes = self.query_input_nodes + self.value_input_nodes - - # Get list of nodes in order specified in self.field_names - self.input_nodes = [None] * len(field_weights) - for i in range(self.num_keys): - self.input_nodes[self.key_indices[i]] = self.query_input_nodes[i] - for i in range(self.num_values): - self.input_nodes[self.value_indices[i]] = self.value_input_nodes[i] - assert all(self.input_nodes), "PROGRAM ERROR: input_nodes not fully populated." - - self.concatenate_queries_node = self._construct_concatenate_queries_node(concatenate_queries) - self.match_nodes = self._construct_match_nodes(memory_template, memory_capacity, - concatenate_queries,normalize_memories) - self.field_weight_nodes = self._construct_field_weight_nodes(field_weights, - concatenate_queries, - use_gating_for_weighting) - self.weighted_match_nodes = self._construct_weighted_match_nodes(memory_capacity, field_weights) - - self.combined_matches_node = self._construct_combined_matches_node(memory_capacity, - field_weighting, - use_gating_for_weighting) - self.softmax_node = self._construct_softmax_node(memory_capacity, - softmax_gain, - softmax_threshold, - softmax_choice) - - self.softmax_gain_control_node = self._construct_softmax_gain_control_node(softmax_gain) - - self.retrieved_nodes = self._construct_retrieved_nodes(memory_template) - - if use_storage_node: - self.storage_node = self._construct_storage_node(memory_template, field_weights, - self.concatenate_queries_node, - memory_decay_rate, storage_prob) + self._construct_input_nodes() + self._construct_concatenate_queries_node(concatenate_queries) + self._construct_match_nodes(memory_template, memory_capacity, concatenate_queries,normalize_memories) + self._construct_field_weight_nodes(concatenate_queries, use_gating_for_weighting) + self._construct_weighted_match_nodes(concatenate_queries) + self._construct_combined_matches_node(concatenate_queries, memory_capacity, use_gating_for_weighting) + self._construct_softmax_node(memory_capacity, softmax_gain, softmax_threshold, softmax_choice) + self._construct_softmax_gain_control_node(softmax_gain) + self._construct_retrieved_nodes(memory_template) + self._construct_storage_node(use_storage_node, memory_template, memory_decay_rate, storage_prob) # Do some validation and get singleton softmax and match Nodes for concatenated queries if self.concatenate_queries: @@ -2130,48 +2218,28 @@ def _construct_pathways(self, assert not self.field_weight_nodes, \ f"PROGRAM ERROR: There should be no field_weight_nodes for concatenated queries." - # Create field_index map for nodes and projections - _field_index_map = {} - for i in range(len(self.input_nodes)): - _field_index_map[self.input_nodes[i]] = i - if self._use_storage_node: - _field_index_map[self.storage_node.path_afferents[i]] = i - _field_index_map[self.retrieved_nodes[i]] = i - _field_index_map[self.retrieved_nodes[i].path_afferents[0]] = i + + # Create _field_index_map by first assigning indices for all Field Nodes and their Projections + self._field_index_map = {node: field.index for field in self.fields for node in field.nodes} + self._field_index_map.update({proj: field.index for field in self.fields for proj in field.projections}) if self.concatenate_queries: + # Add projections to concatenated_queries_node with indices of sender query_input_nodes for proj in self.concatenate_queries_node.path_afferents: - _field_index_map[proj] = _field_index_map[proj.sender.owner] - _field_index_map[self.concatenate_queries_node] = None - _field_index_map[self.match_nodes[0]] = None - _field_index_map[self.match_nodes[0].path_afferents[0]] = None - _field_index_map[self.match_nodes[0].efferents[0]] = None - else: - # Input nodes, Projections to storage_node, retrieval Projections and retrieved_nodes - for match_node in self.match_nodes: - field_index = _field_index_map[match_node.path_afferents[0].sender.owner] - # match_node - _field_index_map[match_node] = field_index - # afferent MEMORY Projection - _field_index_map[match_node.path_afferents[0]] = field_index - # efferent Projection to weighted_match_node - _field_index_map[match_node.efferents[0]] = field_index - # weighted_match_node - _field_index_map[match_node.efferents[0].receiver.owner] = field_index - # Projection to combined_matches_node - _field_index_map[match_node.efferents[0].receiver.owner.efferents[0]] = field_index - for field_weight_node in self.field_weight_nodes: - # Weight nodes; - _field_index_map[field_weight_node] = _field_index_map[field_weight_node.efferents[0].receiver.owner] - # Weight Projections; - _field_index_map[field_weight_node.efferents[0]] = _field_index_map[field_weight_node] - self._field_index_map = _field_index_map + self._field_index_map[proj] = self._field_index_map[proj.sender.owner] + # No indices for singleton Nodes and Projections from concatenated_queries_node through to softmax_node + self._field_index_map[self.concatenate_queries_node] = None + self._field_index_map[self.match_nodes[0]] = None + self._field_index_map[self.match_nodes[0].path_afferents[0]] = None + self._field_index_map[self.match_nodes[0].efferents[0]] = None + # Construct Pathways -------------------------------------------------------------------------------- + # FIX: REFACTOR TO ITERATE OVER Fields # LEARNING NOT ENABLED -------------------------------------------------- # Set up pathways WITHOUT PsyNeuLink learning pathways if not self.enable_learning: - self.add_nodes(self.query_input_nodes + self.value_input_nodes) + self.add_nodes(self.input_nodes) if use_storage_node: self.add_node(self.storage_node) if self.concatenate_queries_node: @@ -2215,7 +2283,7 @@ def _construct_pathways(self, self.add_node(self.softmax_gain_control_node) # field_weights -> weighted_softmax pathways - if self.field_weight_nodes: + if any(self.field_weight_nodes): for i in range(self.num_keys): self.add_linear_processing_pathway([self.field_weight_nodes[i], self.weighted_match_nodes[i]]) @@ -2229,59 +2297,50 @@ def _construct_pathways(self, if use_storage_node: self.add_node(self.storage_node) - def _construct_query_input_nodes(self, field_weights)->list: - """Create one node for each key to be used as cue for retrieval (and then stored) in memory. - Used to assign new set of weights for Projection for query_input_node[i] -> match_node[i] - where i is selected randomly without replacement from (0->memory_capacity) + def _construct_input_nodes(self): + """Create one node for each input to EMComposition and identify as key or value """ - assert len(self.key_indices) == self.num_keys, \ f"PROGRAM ERROR: number of keys ({self.num_keys}) does not match number of " \ f"non-zero values in field_weights ({len(self.key_indices)})." - - query_input_nodes = [ProcessingMechanism( - input_shapes=len(self.entry_template[self.key_indices[i]]), - name=f'{self.key_names[i]} [QUERY]') - for i in range(self.num_keys)] - - return query_input_nodes - - def _construct_value_input_nodes(self, field_weights)->list: - """Create one input node for each value to be stored in memory. - Used to assign new set of weights for Projection for combined_matches_node -> retrieved_node[i] - where i is selected randomly without replacement from (0->memory_capacity) - """ - assert len(self.value_indices) == self.num_values, \ f"PROGRAM ERROR: number of values ({self.num_values}) does not match number of " \ - f"non-zero values in field_weights ({len(self.value_indices)})." + f"None's in field_weights ({len(self.value_indices)})." - value_input_nodes = [ProcessingMechanism( - input_shapes=len(self.entry_template[self.value_indices[i]]), - name= f'{self.value_names[i]} [VALUE]') - for i in range(self.num_values)] + for field in [self.fields[i] for i in self.key_indices]: + field.input_node = ProcessingMechanism(name=f'{field.name} [QUERY]', + input_shapes=len(self.entry_template[field.index])) + field.type = FieldType.KEY - return value_input_nodes + for field in [self.fields[i] for i in self.value_indices]: + field.input_node = ProcessingMechanism(name=f'{field.name} [VALUE]', + input_shapes=len(self.entry_template[field.index])) + field.type = FieldType.VALUE - def _construct_concatenate_queries_node(self, concatenate_queries)->ProcessingMechanism: + def _construct_concatenate_queries_node(self, concatenate_queries): """Create node that concatenates the inputs for all keys into a single vector - Used to create a matrix for Projectoin from match / memory weights from concatenate_node -> match_node + Used to create a matrix for Projection from match / memory weights from concatenate_node -> match_node """ - # One node that concatenates inputs from all keys - if not concatenate_queries: - return None + if concatenate_queries: + # One node that concatenates inputs from all keys + self.concatenate_queries_node = ( + ProcessingMechanism(name=CONCATENATE_QUERIES_NAME, + function=Concatenate, + input_ports=[{NAME: 'CONCATENATE', + INPUT_SHAPES: len(self.query_input_nodes[i].output_port.value), + PROJECTIONS: MappingProjection( + name=f'{self.key_names[i]} to CONCATENATE', + sender=self.query_input_nodes[i].output_port, + matrix=IDENTITY_MATRIX)} + for i in range(self.num_keys)])) + # Add Projections from query_input_nodes to concatenate_queries_node to each Field + for i, proj in enumerate(self.concatenate_queries_node.path_afferents): + self.fields[self.key_indices[i]].concatenation_projection = proj + else: - return ProcessingMechanism(function=Concatenate, - input_ports=[{NAME: 'CONCATENATE', - INPUT_SHAPES: len(self.query_input_nodes[i].output_port.value), - PROJECTIONS: MappingProjection( - name=f'{self.key_names[i]} to CONCATENATE', - sender=self.query_input_nodes[i].output_port, - matrix=IDENTITY_MATRIX)} - for i in range(self.num_keys)], - name=CONCATENATE_QUERIES_NAME) - - def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_queries, normalize_memories)->list: + self.concatenate_queries_node = None + + def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_queries, normalize_memories): """Create nodes that, for each key field, compute the similarity between the input and each item in memory. - If self.concatenate_queries is True, then all inputs for keys from concatenated_keys_node are assigned a single match_node, and weights from memory_template are assigned to a Projection @@ -2299,144 +2358,131 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_q for key in memory_template[0]] if concatenate_queries: - # Get fields of memory structure corresponding to the keys - # Number of rows should total number of elements over all keys, + # Assign one match_node for concatenate_queries_node + # - get fields of memory structure corresponding to the keys + # - number of rows should total number of elements over all keys, # and columns should number of items in memory matrix =np.array([np.concatenate((memory_template[:,:self.num_keys][i])) for i in range(memory_capacity)]).transpose() - matrix = np.array(matrix.tolist()) - match_nodes = [ - ProcessingMechanism( - input_ports={NAME: 'CONCATENATED_INPUTS', - INPUT_SHAPES: memory_capacity, - PROJECTIONS: MappingProjection(sender=self.concatenate_queries_node, - matrix=matrix, - function=MatrixTransform( - operation=args[0][OPERATION], - normalize=args[0][NORMALIZE]), - name=f'MEMORY')}, - name='MATCH')] - match_nodes[0]._field_idx = 0 - - # One node for each key + memory_projection = MappingProjection(name=f'MEMORY', + sender=self.concatenate_queries_node, + matrix=np.array(matrix.tolist()), + function=MatrixTransform(operation=args[0][OPERATION], + normalize=args[0][NORMALIZE])) + self.concatenated_match_node = ProcessingMechanism(name='MATCH', + input_ports={NAME: 'CONCATENATED_INPUTS', + INPUT_SHAPES: memory_capacity, + PROJECTIONS: memory_projection}) + # Assign None as match_node for all key Fields (since they first project to concatenate_queries_node) + for field in [field for field in self.fields if field.type == FieldType.KEY]: + field.match_node = None + else: - match_nodes = [ - ProcessingMechanism( - input_ports= { - INPUT_SHAPES:memory_capacity, - PROJECTIONS: MappingProjection(sender=self.query_input_nodes[i].output_port, - matrix = np.array( - memory_template[:,i].tolist()).transpose().astype(float), - function=MatrixTransform(operation=args[i][OPERATION], - normalize=args[i][NORMALIZE]), - name=f'MEMORY for {self.key_names[i]} [KEY]')}, - name=self.key_names[i] + MATCH_TO_KEYS_AFFIX) - for i in range(self.num_keys) - ] - - return match_nodes - - # FIX: CONVERT TO _construct_weight_control_nodes - def _construct_field_weight_nodes(self, field_weights, concatenate_queries, use_gating_for_weighting)->list: + # Assign each key Field its own match_node and "memory" Projection to it + for i in range(self.num_keys): + field = self.fields[self.key_indices[i]] + memory_projection = MappingProjection(name=f'MEMORY for {self.key_names[i]} [KEY]', + sender=self.query_input_nodes[i].output_port, + matrix = np.array( + memory_template[:,i].tolist()).transpose().astype(float), + function=MatrixTransform(operation=args[i][OPERATION], + normalize=args[i][NORMALIZE])) + field.match_node = (ProcessingMechanism(name=self.key_names[i] + MATCH_TO_KEYS_AFFIX, + input_ports= {INPUT_SHAPES:memory_capacity, + PROJECTIONS: memory_projection})) + field.memory_projection = memory_projection + + + def _construct_field_weight_nodes(self, concatenate_queries, use_gating_for_weighting): """Create ProcessingMechanisms that weight each key's softmax contribution to the retrieved values.""" - - field_weight_nodes = [] - if not concatenate_queries and self.num_keys > 1: - if use_gating_for_weighting: - field_weight_nodes = [GatingMechanism(input_ports={VARIABLE: - np.array(field_weights[self.key_indices[i]]), - PARAMS:{DEFAULT_INPUT: DEFAULT_VARIABLE}, - NAME: 'OUTCOME'}, - gate=[key_match_pair[1].output_ports[0]], - name= 'WEIGHT' if self.num_keys == 1 - else f'{self.key_names[i]}{WEIGHT_AFFIX}') - for i, key_match_pair in enumerate(zip(self.query_input_nodes, - self.match_nodes))] - else: - field_weight_nodes = [ProcessingMechanism(input_ports={VARIABLE: - np.array(field_weights[self.key_indices[i]]), - PARAMS: {DEFAULT_INPUT: DEFAULT_VARIABLE}, - NAME: 'FIELD_WEIGHT'}, - name= WEIGHT if self.num_keys == 1 - else f'{self.key_names[i]}{WEIGHT_AFFIX}') - for i in range(self.num_keys)] - return field_weight_nodes - - def _construct_weighted_match_nodes(self, memory_capacity, field_weights)->list: - """Create nodes that weight the output of the match node for each key.""" + for field in [self.fields[i] for i in self.key_indices]: + name = WEIGHT if self.num_keys == 1 else f'{field.name}{WEIGHT_AFFIX}' + variable = np.array(self.field_weights[field.index]) + params = {DEFAULT_INPUT: DEFAULT_VARIABLE} + if use_gating_for_weighting: + field.weight_node = GatingMechanism(name=name, + input_ports={NAME: 'OUTCOME', + VARIABLE: variable, + PARAMS: params}, + gate=field.match_node.output_ports[0]) + else: + field.weight_node = ProcessingMechanism(name=name, + input_ports={NAME: 'FIELD_WEIGHT', + VARIABLE: variable, + PARAMS: params}) - weighted_match_nodes = \ - [ProcessingMechanism(default_variable=[self.match_nodes[i].output_port.value, - self.match_nodes[i].output_port.value], - input_ports=[{PROJECTIONS: - MappingProjection(sender=match_fw_pair[0], - matrix=IDENTITY_MATRIX, - name=f'{MATCH} to {WEIGHTED_MATCH_NODE_NAME} ' - f'for {self.key_names[i]}')}, - {PROJECTIONS: - MappingProjection(sender=match_fw_pair[1], - matrix=FULL_CONNECTIVITY_MATRIX, - name=f'{WEIGHT} to {WEIGHTED_MATCH_NODE_NAME} ' - f'for {self.key_names[i]}')}], - function=LinearCombination(operation=PRODUCT), - name=self.key_names[i] + WEIGHTED_MATCH_AFFIX) - for i, match_fw_pair in enumerate(zip(self.match_nodes, - self.field_weight_nodes))] - - return weighted_match_nodes - - def _construct_softmax_gain_control_node(self, softmax_gain)->Optional[ControlMechanism]: + def _construct_weighted_match_nodes(self, concatenate_queries): + """Create nodes that weight the output of the match node for each key.""" + if not concatenate_queries and self.num_keys > 1: + for field in [self.fields[i] for i in self.key_indices]: + field.weighted_match_node = ( + ProcessingMechanism(name=field.name + WEIGHTED_MATCH_AFFIX, + default_variable=[field.match_node.output_port.value, + field.match_node.output_port.value], + input_ports=[{PROJECTIONS: + MappingProjection(name=(f'{MATCH} to {WEIGHTED_MATCH_NODE_NAME} ' + f'for {field.name}'), + sender=field.match_node, + matrix=IDENTITY_MATRIX)}, + {PROJECTIONS: + MappingProjection(name=(f'{WEIGHT} to {WEIGHTED_MATCH_NODE_NAME} ' + f'for {field.name}'), + sender=field.weight_node, + matrix=FULL_CONNECTIVITY_MATRIX)}], + function=LinearCombination(operation=PRODUCT))) + field.match_projection = field.match_node.efferents[0] + field.weight_projection = field.weight_node.efferents[0] + + def _construct_softmax_gain_control_node(self, softmax_gain): """Create nodes that set the softmax gain (inverse temperature) for each softmax_node.""" - + node = None if softmax_gain == CONTROL: - return ControlMechanism(monitor_for_control=self.combined_matches_node, + node = ControlMechanism(name='SOFTMAX GAIN CONTROL', + monitor_for_control=self.combined_matches_node, control_signals=[(GAIN, self.softmax_node)], - function=get_softmax_gain, - name='SOFTMAX GAIN CONTROL') - else: - return None + function=get_softmax_gain) + self.softmax_gain_control_node = node def _construct_combined_matches_node(self, + concatenate_queries, memory_capacity, - field_weighting, use_gating_for_weighting - )->ProcessingMechanism: + ): """Create node that combines weighted matches for all keys into one match vector.""" - if self.num_keys == 1 or self.concatenate_queries_node: + self.combined_matches_node = None return + field_weighting = len([weight for weight in self.field_weights if weight]) > 1 and not concatenate_queries + if not field_weighting or use_gating_for_weighting: input_source = self.match_nodes else: input_source = self.weighted_match_nodes - combined_matches_node = ( - ProcessingMechanism(input_ports=[{INPUT_SHAPES:memory_capacity, + self.combined_matches_node = ( + ProcessingMechanism(name=COMBINE_MATCHES_NODE_NAME, + input_ports=[{INPUT_SHAPES:memory_capacity, PROJECTIONS:[MappingProjection(sender=s, matrix=IDENTITY_MATRIX, name=f'{WEIGHTED_MATCH_NODE_NAME} ' f'for {self.key_names[i]} to ' f'{COMBINE_MATCHES_NODE_NAME}') - for i, s in enumerate(input_source)]}], - name=COMBINE_MATCHES_NODE_NAME)) + for i, s in enumerate(input_source)]}])) - assert len(combined_matches_node.output_port.value) == memory_capacity, \ - 'PROGRAM ERROR: number of items in combined_matches_node ' \ - f'({len(combined_matches_node.output_port)}) does not match memory_capacity ({self.memory_capacity})' + for i, proj in enumerate(self.combined_matches_node.path_afferents): + self.fields[self.key_indices[i]].weighted_match_projection = proj - return combined_matches_node + assert len(self.combined_matches_node.output_port.value) == memory_capacity, \ + 'PROGRAM ERROR: number of items in combined_matches_node ' \ + f'({len(self.combined_matches_node.output_port)}) does not match memory_capacity ({self.memory_capacity})' - def _construct_softmax_node(self, memory_capacity, softmax_gain, softmax_threshold, softmax_choice)->list: + def _construct_softmax_node(self, memory_capacity, softmax_gain, softmax_threshold, softmax_choice): """Create node that applies softmax to output of combined_matches_node.""" - if self.num_keys == 1 or self.concatenate_queries_node: input_source = self.match_nodes[0] proj_name =f'{MATCH} to {SOFTMAX_NODE_NAME}' - # elif self.concatenate_queries_node: - # input_source = self.concatenate_queries_node - # proj_name =f'{CONCATENATE_QUERIES_NAME} to {SOFTMAX_NODE_NAME}' else: input_source = self.combined_matches_node proj_name =f'{COMBINE_MATCHES_NODE_NAME} to {SOFTMAX_NODE_NAME}' @@ -2446,74 +2492,47 @@ def _construct_softmax_node(self, memory_capacity, softmax_gain, softmax_thresho # ARG_MAX_INDICATOR returns the entry unmodified softmax_choice = ARG_MAX_INDICATOR - softmax_node = ProcessingMechanism(input_ports={INPUT_SHAPES: memory_capacity, - PROJECTIONS: MappingProjection( - sender=input_source, - matrix=IDENTITY_MATRIX, - name=proj_name)}, - function=SoftMax(gain=softmax_gain, - mask_threshold=softmax_threshold, - output=softmax_choice, - adapt_entropy_weighting=.95), - name=SOFTMAX_NODE_NAME) - - return softmax_node - - def _validate_options_with_learning(self, - learn_field_weights, - use_gating_for_weighting, - enable_learning, - softmax_choice): - if use_gating_for_weighting and enable_learning: - warnings.warn(f"The 'enable_learning' option for '{self.name}' cannot be used with " - f"'use_gating_for_weighting' set to True; this will generate an error if its " - f"'learn' method is called. Set 'use_gating_for_weighting' to True in order " - f"to enable learning of field weights.") - - if softmax_choice in {ARG_MAX, PROBABILISTIC} and enable_learning: - warnings.warn(f"The 'softmax_choice' arg of '{self.name}' is set to '{softmax_choice}' with " - f"'enable_learning' set to True; this will generate an error if its " - f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") + self.softmax_node = ProcessingMechanism(name=SOFTMAX_NODE_NAME, + input_ports={INPUT_SHAPES: memory_capacity, + PROJECTIONS: MappingProjection( + sender=input_source, + matrix=IDENTITY_MATRIX, + name=proj_name)}, + function=SoftMax(gain=softmax_gain, + mask_threshold=softmax_threshold, + output=softmax_choice, + adapt_entropy_weighting=.95)) def _construct_retrieved_nodes(self, memory_template)->list: """Create nodes that report the value field(s) for the item(s) matched in memory. """ - self.retrieved_key_nodes = \ - [ProcessingMechanism(input_ports={INPUT_SHAPES: len(self.query_input_nodes[i].variable[0]), - PROJECTIONS: - MappingProjection( - sender=self.softmax_node, - matrix=memory_template[:,i], - name=f'MEMORY FOR {self.key_names[i]} [RETRIEVE KEY]') - }, - name= self.key_names[i] + RETRIEVED_AFFIX) - for i in range(self.num_keys)] - - self.retrieved_value_nodes = \ - [ProcessingMechanism(input_ports={INPUT_SHAPES: len(self.value_input_nodes[i].variable[0]), - PROJECTIONS: - MappingProjection( - sender=self.softmax_node, - matrix=memory_template[:, - i + self.num_keys], - name=f'MEMORY FOR {self.value_names[i]} [RETRIEVE VALUE]')}, - name= self.value_names[i] + RETRIEVED_AFFIX) - for i in range(self.num_values)] - - retrieved_nodes = self.retrieved_key_nodes + self.retrieved_value_nodes - - # Return nodes in order sorted by self.field_names - # (use name_without_suffix as reference in case more than one EMComposition is created, - # in which case retrieved_nodes will have "-" appended to their name) - return [node for name in self.field_names for node in retrieved_nodes - if node in retrieved_nodes if (name + RETRIEVED_AFFIX) == name_without_suffix(node.name)] + key_idx = 0 + value_idx = 0 + for field in self.fields: + # FIX: 11/24/24 - REFACTOR TO USE memory_template[:,self.index] ONCE MEMORY IS REFACTORED BASED ON FIELDS + if field.type == FieldType.KEY: + matrix = memory_template[:,key_idx] + key_idx += 1 + else: + matrix = memory_template[:,self.num_keys + value_idx] + key_idx += 1 + + field.retrieved_node = ( + ProcessingMechanism(name=field.name + RETRIEVED_AFFIX, + input_ports={INPUT_SHAPES: len(field.input_node.variable[0]), + PROJECTIONS: + MappingProjection( + sender=self.softmax_node, + matrix=matrix, + name=f'MEMORY FOR {field.name} ' + f'[RETRIEVE {field.type.name}]')})) + field.retrieve_projection = field.retrieved_node.path_afferents[0] def _construct_storage_node(self, + use_storage_node, memory_template, - field_weights, - concatenate_queries_node, memory_decay_rate, - storage_prob)->list: + storage_prob): """Create EMStorageMechanism that stores the key and value inputs in memory. Memories are stored by adding the current input to each field to the corresponding row of the matrix for the Projection from the query_input_node (or concatenate_node) to the matching_node and retrieved_node for keys, @@ -2541,24 +2560,22 @@ def _construct_storage_node(self, - **storage_prob** -- probability for storing an entry in `memory `. """ - - learning_signals = [match_node.input_port.path_afferents[0] - for match_node in self.match_nodes] + \ - [retrieved_node.input_port.path_afferents[0] - for retrieved_node in self.retrieved_nodes] - - storage_node = EMStorageMechanism(default_variable=[self.input_nodes[i].value[0] - for i in range(self.num_fields)], - fields=[self.input_nodes[i] for i in range(self.num_fields)], - field_types=[0 if weight is None else 1 for weight in field_weights], - concatenation_node=concatenate_queries_node, - memory_matrix=memory_template, - learning_signals=learning_signals, - storage_prob=storage_prob, - decay_rate = memory_decay_rate, - name=STORE_NODE_NAME) - - return storage_node + if use_storage_node: + learning_signals = [match_node.input_port.path_afferents[0] + for match_node in self.match_nodes] + [retrieved_node.input_port.path_afferents[0] + for retrieved_node in self.retrieved_nodes] + self.storage_node = ( + EMStorageMechanism(default_variable=[field.input_node.value[0] for field in self.fields], + fields=[field.input_node for field in self.fields], + field_types=[1 if field.type is FieldType.KEY else 0 for field in self.fields], + concatenation_node=self.concatenate_queries_node, + memory_matrix=memory_template, + learning_signals=learning_signals, + storage_prob=storage_prob, + decay_rate = memory_decay_rate, + name=STORE_NODE_NAME)) + for field in self.fields: + field.storage_projection = self.storage_node.path_afferents[field.index] def _set_learning_attributes(self): """Set learning-related attributes for Node and Projections @@ -2578,7 +2595,6 @@ def _set_learning_attributes(self): learning_rate = True elif isinstance(self.learn_field_weights, (bool, int, float)): learning_rate = self.learn_field_weights - # Use individually specified learning_rate else: # FIX: THIS NEEDS TO USE field_index_map, BUT THAT DOESN'T SEEM TO HAVE THE WEIGHT PROJECTION YET @@ -2597,6 +2613,22 @@ def _set_learning_attributes(self): if projection.learning_mechanism: projection.learning_mechanism.learning_rate = learning_rate + def _validate_options_with_learning(self, + use_gating_for_weighting, + enable_learning, + softmax_choice): + if use_gating_for_weighting and enable_learning: + warnings.warn(f"The 'enable_learning' option for '{self.name}' cannot be used with " + f"'use_gating_for_weighting' set to True; this will generate an error if its " + f"'learn' method is called. Set 'use_gating_for_weighting' to True in order " + f"to enable learning of field weights.") + + if softmax_choice in {ARG_MAX, PROBABILISTIC} and enable_learning: + warnings.warn(f"The 'softmax_choice' arg of '{self.name}' is set to '{softmax_choice}' with " + f"'enable_learning' set to True; this will generate an error if its " + f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") + + #endregion # ***************************************************************************************************************** @@ -2743,3 +2775,42 @@ def do_gradient_optimization(self, retain_in_pnl_options, context, optimization_ pass #endregion + + # ***************************************************************************************************************** + # ***************************************** Properties ********************************************************** + # ***************************************************************************************************************** + # region + @property + def input_nodes(self): + return [field.input_node for field in self.fields] + + @property + def query_input_nodes(self): + return [field.input_node for field in self.fields if field.type == FieldType.KEY] + + @property + def value_input_nodes(self): + return [field.input_node for field in self.fields if field.type == FieldType.VALUE] + + @property + def match_nodes(self): + if self.concatenate_queries_node: + return [self.concatenated_match_node] + else: + return [field.match_node for field in self.fields if field.type == FieldType.KEY] + + @property + def field_weight_nodes(self): + return [field.weight_node for field in self.fields + if field.weight_node and field.type == FieldType.KEY] + + @property + def weighted_match_nodes(self): + return [field.weighted_match_node for field in self.fields + if field.weighted_match_node and (field.type == FieldType.KEY)] + + @property + def retrieved_nodes(self): + return [field.retrieved_node for field in self.fields] + + #endregion diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index 4413eb0963..989b897603 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -456,77 +456,77 @@ class TestExecution: # memory_template mem mem mem fld concat nlz sm str inputs expected_retrieval # fill cap decay wts keys mem gain prob # ---------------------------------------------------------------------------------- ------------------------ - (0, [[[1,2,3],[4,6]], - [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], None, 3, 0, [1,None], None, None, 100, 0, [[[1, 2, 3]]], [[1., 2., 3.16585899], - [4., 6.16540637]]), - (1, [[[1,2,3],[4,6]], - [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], None, 3, 0, [1,None], None, None, 100, 0, [[1, 2, 3], - [4, 6]], [[1., 2., 3.16585899], - [4., 6.16540637]]), - (2, [[[1,2,3],[4,6]], - [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], None, 3, 0, [1,None], None, None, 100, 0, [[1, 2, 3], - [4, 8]], [[1., 2., 3.16585899], - [4., 6.16540637]]), - (3, [[[1,2,3],[4,6]], - [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, 0, [1,None], None, None, 100, 0, [[1, 2, 3], - [4, 8]], [[0.99998628, - 1.99997247, - 3.1658154 ], - [3.99994492, - 6.16532141]]), - (4, [[[1,2,3],[4,6]], # Equal field_weights (but not concatenated) - [[1,2,5],[4,6]], - [[1,2,10],[4,6]]], (0,.01), 4, 0, [1,1], None, None, 100, 0, [[1, 2, 3], - [4, 6]], [[0.99750462, - 1.99499376, - 3.51623568], - [3.98998465, - 5.9849743]] - ), - (5, [[[1,2,3],[4,6]], # Equal field_weights with concatenation - [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, 0, [1,1], True, None, 100, 0, [[1, 2, 4], - [4, 6]], [[0.99898504, - 1.99796378, - 4.00175037], - [3.99592639, - 6.97406456]]), - (6, [[[1,2,3],[4,6]], # Unequal field_weights - [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, 0, [9,1], None, None, 100, 0, [[1, 2, 3], - [4, 6]], [[0.99996025, - 1.99992024, - 3.19317783], - [3.99984044, - 6.19219795]]), - (7, [[[1,2,3],[4,6]], # Store + no decay - [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, 0, [9,1], None, None, 100, 1, [[1, 2, 3], - [4, 6]], [[0.99996025, - 1.99992024, - 3.19317783], - [3.99984044, - 6.19219795]]), - (8, [[[1,2,3],[4,6]], # Store + default decay (should be AUTO) - [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, None, [9,1], None, None, 100, 1,[[1, 2, 3], - [4, 6]], [[0.99996025, - 1.99992024, - 3.19317783], - [3.99984044, - 6.19219795]]), - (9, [[[1,2,3],[4,6]], # Store + explicit AUTO decay - [[1,2,5],[4,8]], - [[1,2,10],[4,10]]], (0,.01), 4, AUTO, [9,1], None, None, 100, 1, [[1, 2, 3], - [4, 6]], [[0.99996025, - 1.99992024, - 3.19317783], - [3.99984044, - 6.19219795]]), + # (0, [[[1,2,3],[4,6]], + # [[1,2,5],[4,8]], + # [[1,2,10],[4,10]]], None, 3, 0, [1,None], None, None, 100, 0, [[[1, 2, 3]]], [[1., 2., 3.16585899], + # [4., 6.16540637]]), + # (1, [[[1,2,3],[4,6]], + # [[1,2,5],[4,8]], + # [[1,2,10],[4,10]]], None, 3, 0, [1,None], None, None, 100, 0, [[1, 2, 3], + # [4, 6]], [[1., 2., 3.16585899], + # [4., 6.16540637]]), + # (2, [[[1,2,3],[4,6]], + # [[1,2,5],[4,8]], + # [[1,2,10],[4,10]]], None, 3, 0, [1,None], None, None, 100, 0, [[1, 2, 3], + # [4, 8]], [[1., 2., 3.16585899], + # [4., 6.16540637]]), + # (3, [[[1,2,3],[4,6]], + # [[1,2,5],[4,8]], + # [[1,2,10],[4,10]]], (0,.01), 4, 0, [1,None], None, None, 100, 0, [[1, 2, 3], + # [4, 8]], [[0.99998628, + # 1.99997247, + # 3.1658154 ], + # [3.99994492, + # 6.16532141]]), + # (4, [[[1,2,3],[4,6]], # Equal field_weights (but not concatenated) + # [[1,2,5],[4,6]], + # [[1,2,10],[4,6]]], (0,.01), 4, 0, [1,1], None, None, 100, 0, [[1, 2, 3], + # [4, 6]], [[0.99750462, + # 1.99499376, + # 3.51623568], + # [3.98998465, + # 5.9849743]] + # ), + # (5, [[[1,2,3],[4,6]], # Equal field_weights with concatenation + # [[1,2,5],[4,8]], + # [[1,2,10],[4,10]]], (0,.01), 4, 0, [1,1], True, None, 100, 0, [[1, 2, 4], + # [4, 6]], [[0.99898504, + # 1.99796378, + # 4.00175037], + # [3.99592639, + # 6.97406456]]), + # (6, [[[1,2,3],[4,6]], # Unequal field_weights + # [[1,2,5],[4,8]], + # [[1,2,10],[4,10]]], (0,.01), 4, 0, [9,1], None, None, 100, 0, [[1, 2, 3], + # [4, 6]], [[0.99996025, + # 1.99992024, + # 3.19317783], + # [3.99984044, + # 6.19219795]]), + # (7, [[[1,2,3],[4,6]], # Store + no decay + # [[1,2,5],[4,8]], + # [[1,2,10],[4,10]]], (0,.01), 4, 0, [9,1], None, None, 100, 1, [[1, 2, 3], + # [4, 6]], [[0.99996025, + # 1.99992024, + # 3.19317783], + # [3.99984044, + # 6.19219795]]), + # (8, [[[1,2,3],[4,6]], # Store + default decay (should be AUTO) + # [[1,2,5],[4,8]], + # [[1,2,10],[4,10]]], (0,.01), 4, None, [9,1], None, None, 100, 1,[[1, 2, 3], + # [4, 6]], [[0.99996025, + # 1.99992024, + # 3.19317783], + # [3.99984044, + # 6.19219795]]), + # (9, [[[1,2,3],[4,6]], # Store + explicit AUTO decay + # [[1,2,5],[4,8]], + # [[1,2,10],[4,10]]], (0,.01), 4, AUTO, [9,1], None, None, 100, 1, [[1, 2, 3], + # [4, 6]], [[0.99996025, + # 1.99992024, + # 3.19317783], + # [3.99984044, + # 6.19219795]]), (10, [[[1,2,3],[4,6]], # Store + numerical decay [[1,2,5],[4,8]], [[1,2,10],[4,10]]], (0,.01), 4, .1, [9,1], None, None, 100, 1, [[1, 2, 3], From 5c9f60aa680b6bc5549d622dee1633f0ba0e710e Mon Sep 17 00:00:00 2001 From: jdcpni Date: Mon, 25 Nov 2024 17:29:26 -0500 Subject: [PATCH 31/34] patch/emcomposition/field_memory_indices (#3127) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * • emcomposition.py assign memory indices in field-order • test_emcomposition.py - test_order_fields_in_memory --- .../modulatory/learning/EMstoragemechanism.py | 3 +- .../library/compositions/emcomposition.py | 45 +++++++++--------- tests/composition/test_emcomposition.py | 47 +++++++++++++++---- 3 files changed, 60 insertions(+), 35 deletions(-) diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index fbd49f4d7a..f828ca9b68 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -672,13 +672,14 @@ def _validate_params(self, request_set, target_set=None, context=None): f"in its variable ({len(self.variable)}).") # Ensure shape of learning_signals matches shapes of matrices for match nodes (i.e., either keys or concatenate) + key_indices = [i for i, field_type in enumerate(field_types) if field_type == 1] for i, learning_signal in enumerate(learning_signals[:num_match_fields]): learning_signal_shape = learning_signal.parameters.matrix._get(context).shape if concatenate_queries: memory_matrix_field_shape = np.array([np.concatenate(row, dtype=object).flatten() for row in memory_matrix[:,0:num_keys]]).T.shape else: - memory_matrix_field_shape = np.array(memory_matrix[:,i].tolist()).T.shape + memory_matrix_field_shape = np.array(memory_matrix[:,key_indices[i]].tolist()).T.shape assert learning_signal_shape == memory_matrix_field_shape, \ f"The shape ({learning_signal_shape}) of the matrix for the Projection {learning_signal.name} " \ f"used to specify learning signal {i} of {self.name} does not match the shape " \ diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index 163a17b319..7bc65e44bf 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -1021,7 +1021,7 @@ def field_weights_setter(field_weights, owning_component=None, context=None): raise EMCompositionError(f"The number of field_weights ({len(field_weights)}) must match the number of fields " f"{len(owning_component.field_weights)}") if owning_component.normalize_field_weights: - denominator = np.sum(np.where(field_weights is not None, field_weights, 0)) + denominator = np.sum(np.where(field_weights is not None, field_weights, 0)) or 1 field_weights = [fw / denominator if fw is not None else None for fw in field_weights] # Assign new fields_weights to default_variable of field_weight_nodes @@ -1898,10 +1898,16 @@ def _validate_memory_specs(self, memory_template, memory_capacity, memory_fill, if all([fw is None for fw in _field_wts]): raise EMCompositionError(f"The entries in 'field_weights' arg for {name} can't all be 'None' " f"since that will preclude the construction of any keys.") - if all([fw in {0, None} for fw in _field_wts]): - warnings.warn(f"All of the entries in the 'field_weights' arg for {name} are either None or " - f"set to 0; this will result in no retrievals unless/until the 0(s) is(are) changed " - f"to a positive value.") + + if not any(_field_wts): + warnings.warn(f"All of the entries in the 'field_weights' arg for {name} " + f"are either None or set to 0; this will result in no retrievals " + f"unless/until one or more of them are changed to a positive value.") + + elif any([fw == 0 for fw in _field_wts if fw is not None]): + warnings.warn(f"Some of the entries in the 'field_weights' arg for {name} " + f"are set to 0; those fields will be ignored during retrieval " + f"unless/until they are changed to a positive value.") # If field_names has more than one value it must match the first dimension (axis 0) of memory_template: if field_names and len(field_names) != num_fields: @@ -2058,7 +2064,7 @@ def _parse_fields_dict(name, fields, num_fields)->(list,list,list,list): if normalize_field_weights and not all([fw == 0 for fw in field_weights]): # noqa: E127 fld_wts_0s_for_Nones = [fw if fw is not None else 0 for fw in field_weights] - parsed_field_weights = fld_wts_0s_for_Nones / np.sum(fld_wts_0s_for_Nones) + parsed_field_weights = list(np.array(fld_wts_0s_for_Nones) / (np.sum(fld_wts_0s_for_Nones) or 1)) parsed_field_weights = [pfw if fw is not None else None for pfw, fw in zip(parsed_field_weights, field_weights)] else: @@ -2380,13 +2386,14 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_q else: # Assign each key Field its own match_node and "memory" Projection to it for i in range(self.num_keys): - field = self.fields[self.key_indices[i]] - memory_projection = MappingProjection(name=f'MEMORY for {self.key_names[i]} [KEY]', - sender=self.query_input_nodes[i].output_port, - matrix = np.array( - memory_template[:,i].tolist()).transpose().astype(float), - function=MatrixTransform(operation=args[i][OPERATION], - normalize=args[i][NORMALIZE])) + key_idx = self.key_indices[i] + field = self.fields[key_idx] + memory_projection = ( + MappingProjection(name=f'MEMORY for {self.key_names[i]} [KEY]', + sender=self.query_input_nodes[i].output_port, + matrix = np.array(memory_template[:,key_idx].tolist()).transpose().astype(float), + function=MatrixTransform(operation=args[key_idx][OPERATION], + normalize=args[key_idx][NORMALIZE]))) field.match_node = (ProcessingMechanism(name=self.key_names[i] + MATCH_TO_KEYS_AFFIX, input_ports= {INPUT_SHAPES:memory_capacity, PROJECTIONS: memory_projection})) @@ -2506,24 +2513,14 @@ def _construct_softmax_node(self, memory_capacity, softmax_gain, softmax_thresho def _construct_retrieved_nodes(self, memory_template)->list: """Create nodes that report the value field(s) for the item(s) matched in memory. """ - key_idx = 0 - value_idx = 0 for field in self.fields: - # FIX: 11/24/24 - REFACTOR TO USE memory_template[:,self.index] ONCE MEMORY IS REFACTORED BASED ON FIELDS - if field.type == FieldType.KEY: - matrix = memory_template[:,key_idx] - key_idx += 1 - else: - matrix = memory_template[:,self.num_keys + value_idx] - key_idx += 1 - field.retrieved_node = ( ProcessingMechanism(name=field.name + RETRIEVED_AFFIX, input_ports={INPUT_SHAPES: len(field.input_node.variable[0]), PROJECTIONS: MappingProjection( sender=self.softmax_node, - matrix=matrix, + matrix=memory_template[:,field.index], name=f'MEMORY FOR {field.name} ' f'[RETRIEVE {field.type.name}]')})) field.retrieve_projection = field.retrieved_node.path_afferents[0] diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index 989b897603..26aa81b0e3 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -434,17 +434,44 @@ def test_field_args_and_map_assignments(self, assert em._field_index_map[[k for k in em._field_index_map.keys() if 'WEIGHT to WEIGHTED MATCH for KEY B' in k.name][0]] == 2 - def test_field_weights_all_None_and_or_0(self): - with pytest.raises(EMCompositionError) as error_text: - EMComposition(memory_template=(3,1), memory_capacity=1, field_weights=[None, None, None]) - assert error_text.value.error_value == (f"The entries in 'field_weights' arg for EM_Composition can't all " - f"be 'None' since that will preclude the construction of any keys.") + @pytest.mark.parametrize('field_weight_1', ([None], [0], [1]), ids=['None', '0', '1']) + @pytest.mark.parametrize('field_weight_2', ([None], [0], [1]), ids=['None', '0', '1']) + @pytest.mark.parametrize('field_weight_3', ([None], [0], [1]), ids=['None', '0', '1']) + def test_order_fields_in_memory(self, field_weight_1, field_weight_2, field_weight_3): + """Test that order of keys and values doesn't matter""" - with pytest.warns(UserWarning) as warning: - EMComposition(memory_template=(3,1), memory_capacity=1, field_weights=[0, None, 0]) - warning_msg = (f"All of the entries in the 'field_weights' arg for EM_Composition are either None or set to 0; " - f"this will result in no retrievals unless/until the 0(s) is(are) changed to a positive value.") - assert warning_msg in str(warning[0].message) + # pytest.skip(>) + + def construct_em(field_weights): + return pnl.EMComposition(memory_template=[[[5,0], [5], [5,0,3]], [[20,0], [20], [20,1,199]]], + memory_capacity=4, + field_weights=field_weights) + + field_weights = field_weight_1 + field_weight_2 + field_weight_3 + + if all([fw is None for fw in field_weights]): + with pytest.raises(EMCompositionError) as error_text: + construct_em(field_weights) + assert ("The entries in 'field_weights' arg for EM_Composition can't all be 'None' " + "since that will preclude the construction of any keys." in str(error_text.value)) + + elif not any(field_weights): + with pytest.warns(UserWarning) as warning: + construct_em(field_weights) + warning_msg = ("All of the entries in the 'field_weights' arg for EM_Composition " + "are either None or set to 0; this will result in no retrievals " + "unless/until one or more of them are changed to a positive value.") + assert warning_msg in str(warning[0].message) + + elif any([fw == 0 for fw in field_weights]): + with pytest.warns(UserWarning) as warning: + construct_em(field_weights) + warning_msg = ("Some of the entries in the 'field_weights' arg for EM_Composition are set to 0; those " + "fields will be ignored during retrieval unless/until they are changed to a positive value.") + assert warning_msg in str(warning[0].message) + + else: + construct_em(field_weights) @pytest.mark.pytorch From ad37a11e5fb89aba6c3f61c5b44ff0d399fda296 Mon Sep 17 00:00:00 2001 From: jdcpni Date: Mon, 25 Nov 2024 20:33:41 -0500 Subject: [PATCH 32/34] patch/emcomposition/field_memories (#3128) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * • emcomposition.py fix bug in field.memories property • test_emcomposition.py test_order_fields_in_memory: add tests for field.type and memory alignment --- .../library/compositions/emcomposition.py | 10 +++++----- tests/composition/test_emcomposition.py | 18 +++++++++++++++--- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index 7bc65e44bf..c76aab16a4 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -954,8 +954,8 @@ from psyneulink.core.llvm import ExecutionMode -__all__ = ['EMComposition', 'EMCompositionError', 'FIELD_WEIGHT', 'KEY', 'LEARN_FIELD_WEIGHT', - 'PROBABILISTIC', 'TARGET_FIELD','WEIGHTED_AVG'] +__all__ = ['EMComposition', 'EMCompositionError', 'FieldType', 'FIELD_WEIGHT', + 'KEY', 'LEARN_FIELD_WEIGHT', 'PROBABILISTIC', 'TARGET_FIELD','WEIGHTED_AVG'] KEY = 'key' @@ -1134,14 +1134,14 @@ def weighted_match(self): @property def retrieved_memory(self): - return self.retrieve_node.value + return self.retrieved_node.value @property def memories(self): - return self.retrieve_node.path_afferents[0].matrix + return self.retrieved_node.path_afferents[0].matrix.modulated def retrieval_operation(self): - return self.retrieve_node.path_afferents[0].function.operation + return self.retrieved_node.path_afferents[0].function.operation class EMComposition(AutodiffComposition): diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index 26aa81b0e3..9a6a2d7a74 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -448,6 +448,7 @@ def construct_em(field_weights): field_weights=field_weights) field_weights = field_weight_1 + field_weight_2 + field_weight_3 + em = None if all([fw is None for fw in field_weights]): with pytest.raises(EMCompositionError) as error_text: @@ -457,7 +458,7 @@ def construct_em(field_weights): elif not any(field_weights): with pytest.warns(UserWarning) as warning: - construct_em(field_weights) + em = construct_em(field_weights) warning_msg = ("All of the entries in the 'field_weights' arg for EM_Composition " "are either None or set to 0; this will result in no retrievals " "unless/until one or more of them are changed to a positive value.") @@ -465,13 +466,24 @@ def construct_em(field_weights): elif any([fw == 0 for fw in field_weights]): with pytest.warns(UserWarning) as warning: - construct_em(field_weights) + em = construct_em(field_weights) warning_msg = ("Some of the entries in the 'field_weights' arg for EM_Composition are set to 0; those " "fields will be ignored during retrieval unless/until they are changed to a positive value.") assert warning_msg in str(warning[0].message) else: - construct_em(field_weights) + em = construct_em(field_weights) + + if em: + for field_weight, field in zip(field_weights, em.fields): + # Validate proper field-type assignments + if field_weight is None: + assert field.type == pnl.FieldType.VALUE + else: + assert field.type == pnl.FieldType.KEY + # Validate alignment of field with memory + assert len(field.memories[0]) == [2,1,3][field.index] + @pytest.mark.pytorch From 06c5a65aefd660b823afa2f5f4b1d4194d2ee92a Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 26 Nov 2024 04:24:18 +0000 Subject: [PATCH 33/34] ci: test-release: add matrix.dist to test result artifact avoid duplication --- .github/workflows/test-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-release.yml b/.github/workflows/test-release.yml index a0a74cb200..b18d9ec71c 100644 --- a/.github/workflows/test-release.yml +++ b/.github/workflows/test-release.yml @@ -128,7 +128,7 @@ jobs: - name: Upload test results uses: actions/upload-artifact@v4 with: - name: test-results-${{ matrix.os }}-${{ matrix.python-version }} + name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.dist }} path: tests_out.xml retention-days: 30 if: success() || failure() From f3c01d920dac0b71796627823ddb2f05692340e5 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 26 Nov 2024 04:26:24 +0000 Subject: [PATCH 34/34] ci: test-release: exclude macos-11 on py3.7 version is unavailable. match exclusion in other workflows --- .github/workflows/test-release.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/test-release.yml b/.github/workflows/test-release.yml index b18d9ec71c..2f21bba3b5 100644 --- a/.github/workflows/test-release.yml +++ b/.github/workflows/test-release.yml @@ -72,6 +72,11 @@ jobs: python-version: [3.7, 3.8, 3.9, '3.10', 3.11, 3.12] os: [ubuntu-latest, macos-latest, windows-latest] dist: [wheel, sdist] + exclude: + # 3.7 is broken on macos-11, + # https://github.com/actions/virtual-environments/issues/4230 + - python-version: '3.7' + os: macos-latest runs-on: ${{ matrix.os }} needs: [create-python-dist]