From 80f360958368b44cbefe9b2e57f312b15d3a0aee Mon Sep 17 00:00:00 2001 From: connor-krill Date: Wed, 26 Jul 2023 16:02:26 -0400 Subject: [PATCH 01/29] added test for seed_u is None in FORM.run() --- tests/unit_tests/reliability/test_form.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/unit_tests/reliability/test_form.py b/tests/unit_tests/reliability/test_form.py index dadbd457..6d376cb7 100644 --- a/tests/unit_tests/reliability/test_form.py +++ b/tests/unit_tests/reliability/test_form.py @@ -43,6 +43,15 @@ def test_seeds_x_is_none(setup): np.testing.assert_allclose(form_obj.failure_probability, 0.0126, rtol=1e-02) +def test_seed_u_is_none(setup): + """ToDo: Fix FORM.run(seed_x=numpy_array) to pass this test""" + distributions = [Normal(loc=200, scale=20), Normal(loc=150, scale=10)] + form = FORM(distributions=distributions, runmodel_object=setup) + seed_x = np.array([225, 140]) + form.run(seed_x=seed_x) + np.testing.assert_allclose(form.failure_probability, 0.0126, rtol=1e-02) + + def test_tol1_is_not_none(setup): path = os.path.abspath(os.path.dirname(__file__)) os.chdir(path) From ec3f6e9949a9f3c3bf2b50ca6957afb62c348b14 Mon Sep 17 00:00:00 2001 From: connor-krill Date: Thu, 27 Jul 2023 09:21:14 -0400 Subject: [PATCH 02/29] passes test for seed_u_is_none in FORM.run() --- src/UQpy/reliability/taylor_series/FORM.py | 2 +- tests/unit_tests/reliability/test_form.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/UQpy/reliability/taylor_series/FORM.py b/src/UQpy/reliability/taylor_series/FORM.py index c10c895c..acd94e10 100644 --- a/src/UQpy/reliability/taylor_series/FORM.py +++ b/src/UQpy/reliability/taylor_series/FORM.py @@ -156,7 +156,7 @@ def run(self, seed_x: Union[list, np.ndarray] = None, seed_u: Union[list, np.nda elif seed_u is None and seed_x is not None: self.nataf_object.run(samples_x=seed_x.reshape(1, -1), jacobian=False) seed_z = self.nataf_object.samples_z - seed = Decorrelate(seed_z, self.nataf_object.corr_z) + seed = Decorrelate(seed_z, self.nataf_object.corr_z).samples_u elif seed_u is not None and seed_x is None: seed = np.squeeze(seed_u) else: diff --git a/tests/unit_tests/reliability/test_form.py b/tests/unit_tests/reliability/test_form.py index 6d376cb7..266acf87 100644 --- a/tests/unit_tests/reliability/test_form.py +++ b/tests/unit_tests/reliability/test_form.py @@ -44,7 +44,6 @@ def test_seeds_x_is_none(setup): def test_seed_u_is_none(setup): - """ToDo: Fix FORM.run(seed_x=numpy_array) to pass this test""" distributions = [Normal(loc=200, scale=20), Normal(loc=150, scale=10)] form = FORM(distributions=distributions, runmodel_object=setup) seed_x = np.array([225, 140]) From ee0fa3e1ab736f163e63c10c7724cf6971e3bd16 Mon Sep 17 00:00:00 2001 From: connor-krill Date: Thu, 27 Jul 2023 09:30:02 -0400 Subject: [PATCH 03/29] added Connor to development team --- README.rst | 4 +--- docs/source/index.rst | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index 919d8759..6f9d6916 100644 --- a/README.rst +++ b/README.rst @@ -6,8 +6,6 @@ .. |AzureDevops| image:: https://img.shields.io/azure-devops/build/UQpy/5ce1851f-e51f-4e18-9eca-91c3ad9f9900/1?style=plastic :alt: Azure DevOps builds .. |PyPIdownloads| image:: https://img.shields.io/pypi/dm/UQpy?style=plastic :alt: PyPI - Downloads .. |PyPI| image:: https://img.shields.io/pypi/v/UQpy?style=plastic :alt: PyPI -.. |Binder| image:: https://mybinder.org/badge_logo.svg - :target: https://mybinder.org/v2/gh/SURGroup/UQpy/master .. |bear-ified| image:: https://raw.githubusercontent.com/beartype/beartype-assets/main/badge/bear-ified.svg :align: top @@ -32,7 +30,7 @@ Uncertainty Quantification with python (UQpy) + + + | | Ketson RM dos Santos, Katiana Kontolati, Dimitris Loukrezis, | + + + -| | Promit Chakroborty, Lukáš Novák, Andrew Solanto | +| | Promit Chakroborty, Lukáš Novák, Andrew Solanto, Connor Krill | +-----------------------+------------------------------------------------------------------+ | **Contributors:** | Michael Gardner, Prateek Bhustali, Julius Schultz, Ulrich Römer | +-----------------------+------------------------------------------------------------------+ diff --git a/docs/source/index.rst b/docs/source/index.rst index c3a61ae6..fc7af386 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -18,7 +18,7 @@ as a set of modules centered around core capabilities in Uncertainty Quantificat + + + | | Ketson RM dos Santos, Katiana Kontolati, Dimitris Loukrezis, | + + + -| | Promit Chakroborty, Lukáš Novák, Andrew Solanto | +| | Promit Chakroborty, Lukáš Novák, Andrew Solanto, Connor Krill | +-----------------------+------------------------------------------------------------------+ | **Contributors:** | Michael Gardner, Prateek Bhustali, Julius Schultz, Ulrich Römer | +-----------------------+------------------------------------------------------------------+ From 174d578c17c4811415e47b4d3594bc6addf574ab Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Thu, 27 Jul 2023 17:13:43 +0300 Subject: [PATCH 04/29] Minor changes to fix reliability examples --- ...nction_3d.py => FORM_linear_function_3d.py} | 14 +++++++------- docs/code/reliability/sorm/local_model4.py | 8 ++++++++ docs/code/reliability/sorm/local_pfn.py | 18 ++++++++---------- .../sorm/plot_SORM_nonlinear_function.py | 6 +++--- 4 files changed, 26 insertions(+), 20 deletions(-) rename docs/code/reliability/form/{plot_FORM_linear_function_3d.py => FORM_linear_function_3d.py} (71%) create mode 100644 docs/code/reliability/sorm/local_model4.py diff --git a/docs/code/reliability/form/plot_FORM_linear_function_3d.py b/docs/code/reliability/form/FORM_linear_function_3d.py similarity index 71% rename from docs/code/reliability/form/plot_FORM_linear_function_3d.py rename to docs/code/reliability/form/FORM_linear_function_3d.py index 055d7272..2e492e0f 100644 --- a/docs/code/reliability/form/plot_FORM_linear_function_3d.py +++ b/docs/code/reliability/form/FORM_linear_function_3d.py @@ -35,13 +35,13 @@ dist3 = Normal(loc=4., scale=0.4) model = PythonModel(model_script='local_pfn.py', model_object_name="example3",) -RunModelObject3 = RunModel(model=model) +run_model = RunModel(model=model) -Z0 = FORM(distributions=[dist1, dist2, dist3], runmodel_object=RunModelObject3) -Z0.run() +form = FORM(distributions=[dist1, dist2, dist3], runmodel_object=run_model) +form.run() -print('Design point in standard normal space: %s' % Z0.design_point_u) -print('Design point in original space: %s' % Z0.design_point_x) -print('Hasofer-Lind reliability index: %s' % Z0.beta) -print('FORM probability of failure: %s' % Z0.failure_probability) +print('Design point in standard normal space: %s' % form.design_point_u) +print('Design point in original space: %s' % form.design_point_x) +print('Hasofer-Lind reliability index: %s' % form.beta) +print('FORM probability of failure: %s' % form.failure_probability) diff --git a/docs/code/reliability/sorm/local_model4.py b/docs/code/reliability/sorm/local_model4.py new file mode 100644 index 00000000..6c0808cc --- /dev/null +++ b/docs/code/reliability/sorm/local_model4.py @@ -0,0 +1,8 @@ +import numpy as np + + +def example4(samples=None): + g = np.zeros(samples.shape[0]) + for i in range(samples.shape[0]): + g[i] = samples[i, 0] * samples[i, 1] - 80 + return g \ No newline at end of file diff --git a/docs/code/reliability/sorm/local_pfn.py b/docs/code/reliability/sorm/local_pfn.py index db3584d0..2903bf25 100644 --- a/docs/code/reliability/sorm/local_pfn.py +++ b/docs/code/reliability/sorm/local_pfn.py @@ -6,14 +6,15 @@ """ import numpy as np + def example1(samples=None): g = np.zeros(samples.shape[0]) - for i in range(samples.shape[0]): + for i in range(samples.shape[0]): R = samples[i, 0] S = samples[i, 1] g[i] = R - S return g - + def example2(samples=None): import numpy as np @@ -21,18 +22,15 @@ def example2(samples=None): beta = 3.0902 g = np.zeros(samples.shape[0]) for i in range(samples.shape[0]): - g[i] = -1/np.sqrt(d) * (samples[i, 0] + samples[i, 1]) + beta + g[i] = -1 / np.sqrt(d) * (samples[i, 0] + samples[i, 1]) + beta return g def example3(samples=None): g = np.zeros(samples.shape[0]) for i in range(samples.shape[0]): - g[i] = 6.2*samples[i, 0] - samples[i, 1]*samples[i, 2]**2 + g[i] = 6.2 * samples[i, 0] - samples[i, 1] * samples[i, 2] ** 2 return g - -def example4(samples=None): - g = np.zeros(samples.shape[0]) - for i in range(samples.shape[0]): - g[i] = samples[i, 0]*samples[i, 1] - 80 - return g \ No newline at end of file + + + diff --git a/docs/code/reliability/sorm/plot_SORM_nonlinear_function.py b/docs/code/reliability/sorm/plot_SORM_nonlinear_function.py index 25538613..05b2f32d 100644 --- a/docs/code/reliability/sorm/plot_SORM_nonlinear_function.py +++ b/docs/code/reliability/sorm/plot_SORM_nonlinear_function.py @@ -34,13 +34,13 @@ dist1 = Normal(loc=20., scale=2) dist2 = Lognormal(s=s, loc=0.0, scale=scale) -model = PythonModel(model_script='local_pfn.py', model_object_name="example4",) +model = PythonModel(model_script='local_model4.py', model_object_name="example4") RunModelObject4 = RunModel(model=model) form = FORM(distributions=[dist1, dist2], runmodel_object=RunModelObject4) form.run() -Q0 = SORM(form_object=form) +sorm = SORM(form_object=form) # print results -print('SORM probability of failure: %s' % Q0.failure_probability) +print('SORM probability of failure: %s' % sorm.failure_probability) From 7a5bd248576dcced9b9be77f0c003d7564ba7d6b Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Thu, 27 Jul 2023 17:47:44 +0300 Subject: [PATCH 05/29] Corrects equation display in plot_pce_sparsity_lars.py --- .../surrogates/pce/plot_pce_sparsity_lars.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/code/surrogates/pce/plot_pce_sparsity_lars.py b/docs/code/surrogates/pce/plot_pce_sparsity_lars.py index 864e9110..d3bf7363 100644 --- a/docs/code/surrogates/pce/plot_pce_sparsity_lars.py +++ b/docs/code/surrogates/pce/plot_pce_sparsity_lars.py @@ -15,8 +15,7 @@ # %% import numpy as np -import math -import numpy as np + from UQpy.distributions import Uniform, JointIndependent from UQpy.surrogates import * @@ -24,7 +23,8 @@ # %% md # # We then define the Ishigami function, which reads: -# :math:` f(x_1, x_2, x_3) = \sin(x_1) + a \sin^2(x_2) + b x_3^4 \sin(x_1)` +# +# .. math:: f(x_1, x_2, x_3) = \sin(x_1) + a \sin^2(x_2) + b x_3^4 \sin(x_1) # %% @@ -41,7 +41,7 @@ def ishigami(xx): # %% md # -# The Ishigami function has three indepdent random inputs, which are uniformly distributed in +# The Ishigami function has three independent random inputs, which are uniformly distributed in # interval :math:`[-\pi, \pi]`. # %% @@ -70,7 +70,7 @@ def ishigami(xx): # # where :math:`N` is the number of random inputs (here, :math:`N=3`). # -# Note that the size of the basis is highly dependent both on :math:`N` and :math:`P:math:`. It is generally advisable +# Note that the size of the basis is highly dependent both on :math:`N` and :math:`P`. It is generally advisable # that the experimental design has :math:`2-10` times more data points than the number of PCE polynomials. This might # lead to curse of dimensionality and thus we will utilize the best model selection algorithm based on # Least Angle Regression. @@ -102,8 +102,8 @@ def ishigami(xx): # %% md # -# We now fit the PCE coefficients by solving a regression problem. Here we opt for the _np.linalg.lstsq_ method, -# which is based on the _dgelsd_ solver of LAPACK. This original PCE class will be used for further selection of +# We now fit the PCE coefficients by solving a regression problem. Here we opt for the :code:`_np.linalg.lstsq_` method, +# which is based on the :code:`_dgelsd_` solver of LAPACK. This original PCE class will be used for further selection of # the best basis functions. # %% @@ -238,7 +238,7 @@ def ishigami(xx): # %% md # # In case of high-dimensional input and/or high :math:P` it is also beneficial to reduce the TD basis set by hyperbolic -# trunction. The hyperbolic truncation reduces higher-order interaction terms in dependence to parameter :math:`q` in +# truncation. The hyperbolic truncation reduces higher-order interaction terms in dependence to parameter :math:`q` in # interval :math:`(0,1)`. The set of multi indices :math:`\alpha` is reduced as follows: # # :math:`\alpha\in \mathbb{N}^{N}: || \boldsymbol{\alpha}||_q \equiv \Big( \sum_{i=1}^{N} \alpha_i^q \Big)^{1/q} \leq P` From 333be9b4f28c6d40505f640d9b3cdab148241de1 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Thu, 10 Aug 2023 21:10:44 +0300 Subject: [PATCH 06/29] Disables FORM and SORM examples execution --- ...plot_FORM_linear_function_3d.py => FORM_linear_function_3d.py} | 0 ...M_structural_reliability.py => FORM_structural_reliability.py} | 0 ...plot_SORM_nonlinear_function.py => SORM_nonlinear_function.py} | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename docs/code/reliability/form/{plot_FORM_linear_function_3d.py => FORM_linear_function_3d.py} (100%) rename docs/code/reliability/form/{plot_FORM_structural_reliability.py => FORM_structural_reliability.py} (100%) rename docs/code/reliability/sorm/{plot_SORM_nonlinear_function.py => SORM_nonlinear_function.py} (100%) diff --git a/docs/code/reliability/form/plot_FORM_linear_function_3d.py b/docs/code/reliability/form/FORM_linear_function_3d.py similarity index 100% rename from docs/code/reliability/form/plot_FORM_linear_function_3d.py rename to docs/code/reliability/form/FORM_linear_function_3d.py diff --git a/docs/code/reliability/form/plot_FORM_structural_reliability.py b/docs/code/reliability/form/FORM_structural_reliability.py similarity index 100% rename from docs/code/reliability/form/plot_FORM_structural_reliability.py rename to docs/code/reliability/form/FORM_structural_reliability.py diff --git a/docs/code/reliability/sorm/plot_SORM_nonlinear_function.py b/docs/code/reliability/sorm/SORM_nonlinear_function.py similarity index 100% rename from docs/code/reliability/sorm/plot_SORM_nonlinear_function.py rename to docs/code/reliability/sorm/SORM_nonlinear_function.py From bbb4341e77f0f18d74f8cf8fe9e9e02f2fcd4e09 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Thu, 10 Aug 2023 22:23:20 +0300 Subject: [PATCH 07/29] Disables example execution --- ...plot_true_stratified_voronoi.py => true_stratified_voronoi.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/code/sampling/true_stratified_sampling/{plot_true_stratified_voronoi.py => true_stratified_voronoi.py} (100%) diff --git a/docs/code/sampling/true_stratified_sampling/plot_true_stratified_voronoi.py b/docs/code/sampling/true_stratified_sampling/true_stratified_voronoi.py similarity index 100% rename from docs/code/sampling/true_stratified_sampling/plot_true_stratified_voronoi.py rename to docs/code/sampling/true_stratified_sampling/true_stratified_voronoi.py From 9cd9d75625223176b5927ba9bca18667a4509bbe Mon Sep 17 00:00:00 2001 From: connor-krill Date: Tue, 15 Aug 2023 12:39:34 -0400 Subject: [PATCH 08/29] initial commit of the InverseFORM class --- .../reliability/taylor_series/InverseFORM.py | 204 ++++++++++++++++++ 1 file changed, 204 insertions(+) create mode 100644 src/UQpy/reliability/taylor_series/InverseFORM.py diff --git a/src/UQpy/reliability/taylor_series/InverseFORM.py b/src/UQpy/reliability/taylor_series/InverseFORM.py new file mode 100644 index 00000000..0e282607 --- /dev/null +++ b/src/UQpy/reliability/taylor_series/InverseFORM.py @@ -0,0 +1,204 @@ +import logging +import warnings +import numpy as np +import scipy.stats as stats +from typing import Union +from beartype import beartype +from UQpy.distributions import * +from UQpy.transformations import * +from UQpy.run_model.RunModel import RunModel +from UQpy.utilities.ValidationTypes import PositiveInteger +from UQpy.reliability.taylor_series.baseclass.TaylorSeries import TaylorSeries + +warnings.filterwarnings('ignore') + + +class InverseFORM(TaylorSeries): + + @beartype + def __init__( + self, + distributions: Union[None, Distribution, list[Distribution]], + runmodel_object: RunModel, + p_fail: Union[None, float] = 0.05, + beta: Union[None, float] = None, + seed_x: Union[list, np.ndarray] = None, + seed_u: Union[list, np.ndarray] = None, + df_step: Union[int, float] = 0.01, + corr_x: Union[list, np.ndarray] = None, + corr_z: Union[list, np.ndarray] = None, + max_iterations: PositiveInteger = 100, + tolerance_u: Union[float, int, None] = 1e-3, + tolerance_gradient: Union[float, int, None] = 1e-3, + ): + """Class to perform the Inverse First Order Reliability Method. + + Each time :meth:`run` is called the results are appended to the class attributes. + This is a child class of the :class:`TaylorSeries` class. + By default, :code:`tolerance_u` and :code:`tolerance_gradient` must be satisfied for convergence. + Specifying a tolerance as :code:`None` will ignore that criteria, but both cannot be :code:`None`. + + :param distributions: Marginal probability distributions of each random variable. Must be of + type :class:`.DistributionContinuous1D` or :class:`.JointIndependent`. + :param runmodel_object: The computational model. Must be of type :class:`RunModel`. + :param p_fail: Probability of failure defining the feasibility criteria as :math:`||u||=-\\Phi^{-1}(p_{fail})`. + Default: :math:`0.05`. Set to :code:`None` to use :code:`beta` as the feasibility criteria. + Only one of :code:`p_fail` or :code:`beta` may be provided. + :param beta: Hasofer-Lind reliability index defining the feasibility criteria as :math:`||u|| = \\beta`. + Default: :code:`None`. + Only one of code:`p_fail` or :code:`beta` may be provided. + :param seed_x: Point in the parameter space :math:`\mathbf{X}` to start from. + Only one of :code:`seed_x` or :code:`seed_u` may be provided. + If neither is provided, the zero vector in :math:`\mathbf{U}` space is used as the seed. + :param seed_u: Point in the uncorrelated standard normal space :math:`\mathbf{U}` to start from. + Only one of :code:`seed_x` or :code:`seed_u` may be provided. + If neither is provided, the zero vector in :math:`\mathbf{U}` space is used as the seed. + :param df_step: Finite difference step in standard normal space. Default: :math:`0.01` + :param corr_x: Correlation matrix :math:`\mathbf{C_X}` of the random vector :math:`\mathbf{X}` + :param corr_z: Correlation matrix :math:`\mathbf{C_Z}` of the random vector :math:`\mathbf{Z}` + Default: :code:`corr_z` is the identity matrix. + :param max_iterations: Maximum number of iterations for the `HLRF` algorithm. Default: :math:`100` + :param tolerance_u: Convergence threshold for criterion :math:`||\mathbf{U}^{k} - \mathbf{U}^{k-1}||_2 \leq` + :code:`tolerance_u` of the `HLRF` algorithm. + Default: :math:`1.0e-3` + :param tolerance_gradient: Convergence threshold for criterion + :math:`||\\nabla G(\mathbf{U}_i)- \\nabla G(\mathbf{U}_{i-1})||_2 \leq` :code:`tolerance_gradient` + of the `HLRF` algorithm. + Default: :math:`1.0e-3` + """ + self.distributions = distributions + self.runmodel_object = runmodel_object + if (p_fail is not None) and (beta is not None): + raise ValueError('UQpy: Exactly one input (p_fail or beta) must be provided') + elif (p_fail is None) and (beta is None): + raise ValueError('UQpy: Exactly one input (p_fail or beta) must be provided') + elif p_fail is not None: + self.p_fail = p_fail + self.beta = -stats.norm.ppf(self.p_fail) + elif beta is not None: + self.p_fail = stats.norm.cdf(-beta) + self.beta = beta + self.seed_x = seed_x + self.seed_u = seed_u + self.df_step = df_step + self.corr_x = corr_x + self.corr_z = corr_z + self.max_iterations = max_iterations + self.tolerance_u = tolerance_u + self.tolerance_gradient = tolerance_gradient + if (self.tolerance_u is None) and (self.tolerance_gradient is None): + raise ValueError('UQpy: At least one tolerance (tolerance_u or tolerance_gradient) must be provided') + + self.logger = logging.getLogger(__name__) + self.nataf_object = Nataf(distributions=distributions, corr_z=corr_z, corr_x=corr_x) + + # Determine the number of dimensions as the number of random variables + if isinstance(distributions, DistributionContinuous1D): + self.dimension = 1 + elif isinstance(distributions, JointIndependent): + self.dimension = len(distributions.marginals) + elif isinstance(distributions, list): + self.dimension = 0 + for i in range(len(distributions)): + if isinstance(distributions[i], DistributionContinuous1D): + self.dimension += 1 + elif isinstance(distributions[i], JointIndependent): + self.dimension += len(distributions[i].marginals) + + # Initialize attributes + self.alpha: float = np.nan + """Normalized vector in :math:`\\textbf{U}`-space""" + self.alpha_record: list = [] + """Record of alpha :math:`\\alpha=\\frac{\\nabla G(u)}{||\\nabla G(u)||}`""" + self.beta: float = self.beta + """Feasibility criteria for the optimization :math:`||\\textbf{U}|| = \\beta`""" + self.beta_record: list = [] + """Record of Hasofer-Lind reliability index that defines the feasibility criteria :math:`||u||=\\beta_{HL}`""" + self.design_point_u: list = [] + """Design point in the standard normal space :math:`\\textbf{U}`""" + self.design_point_x: list = [] + """Design point in the parameter space :math:`\\textbf{X}`""" + self.error_record: list = [] + """Record of the final error defined by + :math:`error_u = ||u_new - u||` and :math:`error_{gradient} = || \\nabla G(u_new) - \\nabla G(u)||`""" + self.iteration_record: list = [] + """Record of the number of iterations before algorithm termination""" + self.failure_probability_record: list = [] + """Record of the probability of failure defined by :math:`p_{fail} = \\Phi(-\\beta_{HL})`""" + + def run(self, seed_x: Union[list, np.ndarray] = None, seed_u: Union[list, np.ndarray] = None): + """Runs the inverse FORM algorithm. + + :param seed_x: Point in the parameter space :math:`\mathbf{X}` to start from. + Only one of `seed_x` or `seed_u` may be provided. + If neither is provided, the zero vector in :math:`\mathbf{U}` space is used as the seed. + :param seed_u: Point in the uncorrelated standard normal space :math:`\mathbf{U}` to start from. + Only one of `seed_x` or `seed_u` may be provided. + If neither is provided, the zero vector in :math:`\mathbf{U}` space is used as the seed. + """ + self.logger.info('UQpy: Running InverseFORM...') + if (seed_x is not None) and (seed_u is not None): + raise ValueError('UQpy: Only one input (seed_x or seed_u) may be provided') + + # Allocate u and the gradient of G(u) as arrays + u = np.zeros([self.max_iterations + 1, self.dimension]) + state_function_gradient = np.zeros([self.max_iterations + 1, self.dimension]) + + # Set initial seed. If both seed_x and seed_u are None, the initial seed is a vector of zeros in U space. + if seed_u is not None: + u[0, :] = seed_u + elif seed_x is not None: + self.nataf_object.run(samples_x=seed_x.reshape(1, -1), jacobian=False) + seed_z = self.nataf_object.samples_z + u[0, :] = Decorrelate(seed_z, self.nataf_object.corr_z).samples_u + + converged = False + iteration = 0 + while (not converged) and (iteration < self.max_iterations): + self.logger.info(f'Number of iteration: {iteration}') + if iteration == 0: + if seed_x is not None: + x = seed_x + else: + seed_z = Correlate(samples_u=u[0, :].reshape(1, -1), corr_z=self.nataf_object.corr_z).samples_z + self.nataf_object.run(samples_z=seed_z.reshape(1, -1), jacobian=True) + x = self.nataf_object.samples_x + else: + z = Correlate(u[iteration, :].reshape(1, -1), self.nataf_object.corr_z).samples_z + self.nataf_object.run(samples_z=z, jacobian=True) + x = self.nataf_object.samples_x + self.logger.info(f'Design Point U: {u[iteration, :]}\nDesign Point X: {x}\n') + state_function_gradient[iteration + 1, :], _, _ = self._derivatives(point_u=u[iteration, :], + point_x=x, + runmodel_object=self.runmodel_object, + nataf_object=self.nataf_object, + df_step=self.df_step, + order='first') + + alpha = state_function_gradient[iteration + 1] + alpha /= np.linalg.norm(state_function_gradient[iteration + 1]) + u[iteration + 1, :] = -alpha * self.beta + + error_u = np.linalg.norm(u[iteration + 1, :] - u[iteration, :]) + error_gradient = np.linalg.norm(state_function_gradient[iteration + 1, :] + - state_function_gradient[iteration, :]) + + converged_u = True if (self.tolerance_u is None) \ + else (error_u <= self.tolerance_u) + converged_gradient = True if (self.tolerance_gradient is None) \ + else (error_gradient <= self.tolerance_gradient) + converged = converged_u and converged_gradient + + if not converged: + iteration += 1 + + if iteration >= self.max_iterations: + self.logger.info(f'UQpy: Maximum number of iterations {self.max_iterations} reached before convergence') + else: + self.alpha_record.append(alpha) + self.beta_record.append(self.beta) + self.design_point_u.append(u[iteration, :]) + self.design_point_x.append(x) + self.error_record.append((error_u, error_gradient)) + self.iteration_record.append(iteration) + self.failure_probability_record.append(self.p_fail) From c3e5459e133c0f338230fee28a6dc08e8eb47d66 Mon Sep 17 00:00:00 2001 From: connor-krill Date: Tue, 15 Aug 2023 12:40:26 -0400 Subject: [PATCH 09/29] tests for accuracy and behavior under various inputs for InverseFORM class --- tests/unit_tests/reliability/example_7_2.py | 18 +++ .../reliability/test_inverse_form.py | 106 ++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 tests/unit_tests/reliability/example_7_2.py create mode 100644 tests/unit_tests/reliability/test_inverse_form.py diff --git a/tests/unit_tests/reliability/example_7_2.py b/tests/unit_tests/reliability/example_7_2.py new file mode 100644 index 00000000..cbeb6f33 --- /dev/null +++ b/tests/unit_tests/reliability/example_7_2.py @@ -0,0 +1,18 @@ +import numpy as np + + +def performance_function(samples=None): + """Performance function from Chapter 7 Example 7.2 from Du 2005""" + elastic_modulus = 30e6 + length = 100 + width = 2 + height = 4 + d_0 = 3 + + g = np.zeros(samples.shape[0]) + for i in range(samples.shape[0]): + x = (samples[i, 0] / width**2) ** 2 + y = (samples[i, 1] / height**2) ** 2 + d = ((4 * length**3) / (elastic_modulus * width * height)) * np.sqrt(x + y) + g[i] = d_0 - d + return g diff --git a/tests/unit_tests/reliability/test_inverse_form.py b/tests/unit_tests/reliability/test_inverse_form.py new file mode 100644 index 00000000..3233dde1 --- /dev/null +++ b/tests/unit_tests/reliability/test_inverse_form.py @@ -0,0 +1,106 @@ +import os +import pytest +import numpy as np +from scipy import stats +from UQpy.distributions import Normal +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel +from UQpy.reliability.taylor_series import InverseFORM + + +@pytest.fixture +def inverse_form(): + """Example 7.2 from Chapter 7 of Du 2005 + + Distributions are :math:`P_X \\sim N(500, 100)`, :math:`P_Y \\sim N(1000, 100)` + + Solution from the reference is :math:`u^*=(1.7367, 0.16376)` + """ + path = os.path.abspath(os.path.dirname(__file__)) + os.chdir(path) + python_model = PythonModel(model_script='example_7_2.py', + model_object_name='performance_function', + delete_files=True) + runmodel_object = RunModel(model=python_model) + distributions = [Normal(loc=500, scale=100), Normal(loc=1_000, scale=100)] + return InverseFORM(distributions=distributions, + runmodel_object=runmodel_object, + p_fail=0.04054, + tolerance_u=1e-5, + tolerance_gradient=1e-5) + + +def test_no_seed(inverse_form): + """Solution to Chapter 7 Example 7.2 from Du 2005""" + inverse_form.run() + assert np.allclose(inverse_form.design_point_u, np.array([1.7367, 0.16376]), atol=1e-4) + + +def test_seed_x(inverse_form): + seed_x = np.array([625, 900]) + inverse_form.run(seed_x=seed_x) + assert np.allclose(inverse_form.design_point_u, np.array([1.7367, 0.16376]), atol=1e-4) + + +def test_seed_u(inverse_form): + seed_u = np.array([2.4, -1.0]) + inverse_form.run(seed_u=seed_u) + assert np.allclose(inverse_form.design_point_u, np.array([1.7367, 0.16376]), atol=1e-4) + + +def test_both_seeds(inverse_form): + """Expected behavior is to raise ValueError and inform user only one input may be provided""" + seed_x = np.array([1, 2]) + seed_u = np.array([3, 4]) + with pytest.raises(ValueError, match='UQpy: Only one input .* may be provided'): + inverse_form.run(seed_u=seed_u, seed_x=seed_x) + + +def test_neither_tolerance(): + """Expected behavior is to raise ValueError and inform user at least one tolerance must be provided""" + path = os.path.abspath(os.path.dirname(__file__)) + os.chdir(path) + python_model = PythonModel(model_script='example_7_2.py', + model_object_name='performance_function', + delete_files=True) + runmodel_object = RunModel(model=python_model) + distributions = [Normal(loc=500, scale=100), Normal(loc=1_000, scale=100)] + with pytest.raises(ValueError, match='UQpy: At least one tolerance .* must be provided'): + inverse_form = InverseFORM(distributions=distributions, + runmodel_object=runmodel_object, + p_fail=0.04054, + tolerance_u=None, + tolerance_gradient=None) + + +def test_beta(): + path = os.path.abspath(os.path.dirname(__file__)) + os.chdir(path) + python_model = PythonModel(model_script='example_7_2.py', + model_object_name='performance_function', + delete_files=True) + runmodel_object = RunModel(model=python_model) + distributions = [Normal(loc=500, scale=100), Normal(loc=1_000, scale=100)] + # with pytest.raises(ValueError, match='UQpy: At least one tolerance .* must be provided'): + inverse_form = InverseFORM(distributions=distributions, + runmodel_object=runmodel_object, + p_fail=None, + beta=-stats.norm.ppf(0.04054)) + inverse_form.run() + assert np.allclose(inverse_form.design_point_u, np.array([1.7367, 0.16376]), atol=1e-3) + + +def test_no_beta_no_pfail(): + path = os.path.abspath(os.path.dirname(__file__)) + os.chdir(path) + python_model = PythonModel(model_script='example_7_2.py', + model_object_name='performance_function', + delete_files=True) + runmodel_object = RunModel(model=python_model) + distributions = [Normal(loc=500, scale=100), Normal(loc=1_000, scale=100)] + with pytest.raises(ValueError, match='UQpy: Exactly one input .* must be provided'): + inverse_form = InverseFORM(distributions=distributions, + runmodel_object=runmodel_object, + p_fail=None, + beta=None) + From 48b6204ba221f441f055c0e68e4b3c5bb70ca3e9 Mon Sep 17 00:00:00 2001 From: connor-krill Date: Tue, 15 Aug 2023 12:40:59 -0400 Subject: [PATCH 10/29] documentation for InverseFORM as a child of the TaylorSeries class --- docs/source/reliability/inverse_form.rst | 74 +++++++++++++++++++ docs/source/reliability/taylor_series.rst | 1 + .../reliability/taylor_series/__init__.py | 1 + 3 files changed, 76 insertions(+) create mode 100644 docs/source/reliability/inverse_form.rst diff --git a/docs/source/reliability/inverse_form.rst b/docs/source/reliability/inverse_form.rst new file mode 100644 index 00000000..79bf8af8 --- /dev/null +++ b/docs/source/reliability/inverse_form.rst @@ -0,0 +1,74 @@ +Inverse FORM +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In FORM :cite:`FORM_XDu`, the performance function is linearized according to + +.. math:: G(\textbf{U}) \approx G(\textbf{U}^\star) + \nabla G(\textbf{U}^\star)(\textbf{U}-\textbf{U}^\star)^\intercal + +where :math:`\textbf{U}^\star` is the expansion point, :math:`G(\textbf{U})` is the performance function evaluated in +the standard normal space and :math:`\nabla G(\textbf{U}^\star)` is the gradient of :math:`G(\textbf{U})` evaluated at +:math:`\textbf{U}^\star`. The probability failure is approximated as + +.. math:: P_{fail} = \Phi(-\beta_{HL}) + +where :math:`\Phi(\cdot)` is the standard normal cumulative distribution function and :math:`\beta_{HL}=||\textbf{U}^*||` +is the norm of the design point known as the Hasofer-Lind reliability index. + +The goal of the inverse FORM algorithm is to find a design point :math:`\textbf{U}^\star` that minimizes the performance +function :math:`G(\textbf{U})` and lies on the hypersphere defined by :math:`||\textbf{U}^*|| = \beta_{HL}`, or +equivalently :math:`||\textbf{U}^*|| = -\Phi^{-1}(p_{fail})`. The default convergence criteria used for this algorithm +are: + +.. math:: \text{tolerance}_{\textbf{U}}:\ ||\textbf{U}_i - \textbf{U}_{i-1}||_2 \leq 10^{-3} +.. math:: \text{tolerance}_{\nabla G(\textbf{U})}:\ ||\nabla G(\textbf{U}_i)- \nabla G(\textbf{U}_{i-1})||_2 \leq 10^{-3} + + +Problem Statement +----- +Compute :math:`u^* = \text{argmin}\ G(\textbf{U})` such that :math:`||\textbf{U}||=\beta`. + +The feasibility criteria :math:`||\textbf{U}||=\beta` may be equivalently defined as +:math:`\beta = -\Phi^{-1}(p_{fail})`, where :math:`\Phi^{-1}(\cdot)` is the inverse standard normal CDF. + +Algorithm +----- +This method implements a gradient descent algorithm to solve the optimization problem within the tolerances specified by +:math:`\text{tolerance}_{\textbf{U}}` (:code:`tolerance_u`) and :math:`\text{tolerance}_{\nabla G(\textbf{U})}` (:code:`tolerance_gradient`). + +0. Define :math:`u_0` and :math:`\beta` directly or :math:`\beta=-\Phi^{-1}(p_\text{fail})` +1. Set :math:`u=u_0` and :math:`\text{converged}=False` +2. While not :math:`\text{converged}`: + a. :math:`\alpha = \frac{\nabla G(u)}{|| \nabla G(u) ||}` + b. :math:`u_{new} = - \alpha \beta` + c. If :math:`||u_{new} - u || \leq \text{tolerance}_u` and/or :math:`|| \nabla G(u_{new}) - \nabla G(u) || \leq \text{tolerance}_{\nabla G(u)}`; + set :math:`\text{converged}=True` + else; + :math:`u = u_{new}` + +The :class:`.InverseFORM` class is imported using the following command: + +>>> from UQpy.reliability.taylor_series import InverseFORM + +Methods +""""""" +.. autoclass:: UQpy.reliability.taylor_series.InverseFORM + :members: run + +Attributes +"""""""""" +.. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.alpha +.. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.alpha_record +.. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.beta +.. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.beta_record +.. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.design_point_u +.. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.design_point_x +.. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.error_record +.. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.iteration_record +.. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.failure_probability_record + + +Examples +"""""""""" +.. toctree:: + + FORM Examples <../auto_examples/reliability/inverse_form/index> \ No newline at end of file diff --git a/docs/source/reliability/taylor_series.rst b/docs/source/reliability/taylor_series.rst index 29ff80e0..9687f597 100644 --- a/docs/source/reliability/taylor_series.rst +++ b/docs/source/reliability/taylor_series.rst @@ -21,6 +21,7 @@ respectively. These classes can be imported in a python script using the followi FORM
SORM + InverseFORM diff --git a/src/UQpy/reliability/taylor_series/__init__.py b/src/UQpy/reliability/taylor_series/__init__.py index c5af5bab..e2797682 100644 --- a/src/UQpy/reliability/taylor_series/__init__.py +++ b/src/UQpy/reliability/taylor_series/__init__.py @@ -1,3 +1,4 @@ from UQpy.reliability.taylor_series.FORM import FORM from UQpy.reliability.taylor_series.SORM import SORM +from UQpy.reliability.taylor_series.InverseFORM import InverseFORM from UQpy.reliability.taylor_series.baseclass.TaylorSeries import TaylorSeries From 55db785a8150a2d84bfb829aa0322b857e99dd0b Mon Sep 17 00:00:00 2001 From: connor-krill Date: Tue, 15 Aug 2023 12:41:34 -0400 Subject: [PATCH 11/29] scripts to config and run examples for InverseFORM --- docs/code/reliability/inverse_form/README.rst | 4 + .../inverse_form/inverse_form_cantilever.py | 73 +++++++++++++++++++ .../inverse_form/performance_function.py | 18 +++++ 3 files changed, 95 insertions(+) create mode 100644 docs/code/reliability/inverse_form/README.rst create mode 100644 docs/code/reliability/inverse_form/inverse_form_cantilever.py create mode 100644 docs/code/reliability/inverse_form/performance_function.py diff --git a/docs/code/reliability/inverse_form/README.rst b/docs/code/reliability/inverse_form/README.rst new file mode 100644 index 00000000..b3c07358 --- /dev/null +++ b/docs/code/reliability/inverse_form/README.rst @@ -0,0 +1,4 @@ +Inverse FORM Examples +^^^^^ + +The following example is Example 7.2 from Chapter 7 of :cite:`FORM_XDu`. \ No newline at end of file diff --git a/docs/code/reliability/inverse_form/inverse_form_cantilever.py b/docs/code/reliability/inverse_form/inverse_form_cantilever.py new file mode 100644 index 00000000..ed7b2216 --- /dev/null +++ b/docs/code/reliability/inverse_form/inverse_form_cantilever.py @@ -0,0 +1,73 @@ +""" +Inverse FORM - Cantilever Beam +----- + +A cantilever beam (example 7.2 in :cite:`FORM_XDu`) is considered to fail if the displacement at the tip exceeds the +threshold :math:`D_0`. The performance function :math:`G(\textbf{U})` of this problem is given by + +.. math:: G = D_0 - \frac{4L^3}{Ewt} \sqrt{ \left(\frac{P_x}{w^2}\right)^2 + \left(\frac{P_y}{t^2}\right)^2} + +Where the external forces are modeled as random variables :math:`P_x \sim N(500, 100)` and :math:`P_y \sim N(1000,100)`. +The constants in the problem are length (:math:`L=100`), elastic modulus (:math:E=30\times 10^6), cross section width +(:math:`w=2`) and cross section height (:math:`t=4`). + +""" +# %% md +# +# Import the necessary modules. + +# %% +import numpy as np +import matplotlib.pyplot as plt +plt.style.use('ggplot') +from scipy import stats +from UQpy.distributions import Normal +from UQpy.reliability.taylor_series import InverseFORM +from UQpy.run_model.RunModel import RunModel +from UQpy.run_model.model_execution.PythonModel import PythonModel + +# %% md +# +# Next, we initialize the :code:`RunModel` object. +# The file defining the performance function file can be found on the UQpy GitHub. +# It contains a function :code:`cantilever_beam` to compute the performance function :math:`G(\textbf{U})`. + +# %% + +model = PythonModel(model_script='performance_function.py', model_object_name="cantilever_beam") +runmodel_object = RunModel(model=model) + +# %% md +# +# Next, we define the external forces in the :math:`x` and :math:`y` direction as distributions that will be passed into +# :code:`FORM`. Along with the distributions, :code:`FORM` takes in the previously defined :code:`runmodel_object`, +# the specified probability of failure, and the tolerances. These tolerances are smaller than the defaults to ensure +# convergence with the level of accuracy given in the problem. + +# %% + +p_fail = 0.04054 +distributions = [Normal(500, 100), Normal(1_000, 100)] +inverse_form = InverseFORM(distributions=distributions, + runmodel_object=runmodel_object, + p_fail=p_fail, + tolerance_u=1e-5, + tolerance_gradient=1e-5) + +# %% md +# +# With everything defined we are ready to run the inverse first-order reliability method and print the results. +# The solution to this problem given by Du is :math:`\textbf{U}^*=(1.7367, 0.16376)` with a reliability index of +# :math:`\beta_{HL}=||\textbf{U}^*||=1.7444` and probability of failure of +# :math:`p_{fail} = \Phi(-\beta_{HL})=\Phi(-1.7444)=0.04054`. We expect this problem to converge in 4 iterations. +# We confirm our design point matches this length, and therefore has a probability of failure specified by our input. + +# %% + +inverse_form.run() +beta = np.linalg.norm(inverse_form.design_point_u) +print('Design point in standard normal space (u^*):', inverse_form.design_point_u[0]) +print('Design point in original space:', inverse_form.design_point_x[0]) +print('Hasofer-Lind reliability index:', beta) +print('Probability of failure at design point:', stats.norm.cdf(-beta)) +print('Number of iterations:', inverse_form.iteration_record[0]) \ No newline at end of file diff --git a/docs/code/reliability/inverse_form/performance_function.py b/docs/code/reliability/inverse_form/performance_function.py new file mode 100644 index 00000000..4bfbcecf --- /dev/null +++ b/docs/code/reliability/inverse_form/performance_function.py @@ -0,0 +1,18 @@ +import numpy as np + + +def cantilever_beam(samples=None): + """Performance function from Chapter 7 Example 7.2 from Du 2005""" + elastic_modulus = 30e6 + length = 100 + width = 2 + height = 4 + d_0 = 3 + + g = np.zeros(samples.shape[0]) + for i in range(samples.shape[0]): + x = (samples[i, 0] / width**2) ** 2 + y = (samples[i, 1] / height**2) ** 2 + d = ((4 * length**3) / (elastic_modulus * width * height)) * np.sqrt(x + y) + g[i] = d_0 - d + return g From bd5c57de1327622eaae61cc6dcceb38e8e08678c Mon Sep 17 00:00:00 2001 From: connor-krill Date: Tue, 15 Aug 2023 14:15:29 -0400 Subject: [PATCH 12/29] added InverseFORM to paths so the examples would show up in the ReadTheDocs --- docs/source/conf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/source/conf.py b/docs/source/conf.py index b4891bf8..2f4e7635 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -106,6 +106,7 @@ "../code/reliability/form", "../code/reliability/sorm", "../code/reliability/subset_simulation", + "../code/reliability/inverse_form", "../code/surrogates/srom", "../code/surrogates/gpr", "../code/surrogates/pce", @@ -148,6 +149,7 @@ "auto_examples/reliability/form", "auto_examples/reliability/sorm", "auto_examples/reliability/subset_simulation", + "auto_examples/reliability/inverse_form", "auto_examples/surrogates/srom", "auto_examples/surrogates/gpr", "auto_examples/surrogates/pce", From 935d45ddb3cb849aa387e9774561d063832cb85a Mon Sep 17 00:00:00 2001 From: connor-krill Date: Tue, 15 Aug 2023 14:16:14 -0400 Subject: [PATCH 13/29] improved docstrings and markdown for cleaniness and consistency in readthedocs --- docs/code/reliability/inverse_form/README.rst | 4 +--- .../inverse_form/inverse_form_cantilever.py | 22 +++++++++---------- .../{performance_function.py => local_pfn.py} | 6 +++++ docs/source/reliability/inverse_form.rst | 12 +++++----- .../reliability/taylor_series/InverseFORM.py | 7 +++--- 5 files changed, 28 insertions(+), 23 deletions(-) rename docs/code/reliability/inverse_form/{performance_function.py => local_pfn.py} (87%) diff --git a/docs/code/reliability/inverse_form/README.rst b/docs/code/reliability/inverse_form/README.rst index b3c07358..44cdc4d1 100644 --- a/docs/code/reliability/inverse_form/README.rst +++ b/docs/code/reliability/inverse_form/README.rst @@ -1,4 +1,2 @@ Inverse FORM Examples -^^^^^ - -The following example is Example 7.2 from Chapter 7 of :cite:`FORM_XDu`. \ No newline at end of file +^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/code/reliability/inverse_form/inverse_form_cantilever.py b/docs/code/reliability/inverse_form/inverse_form_cantilever.py index ed7b2216..b5bfcb79 100644 --- a/docs/code/reliability/inverse_form/inverse_form_cantilever.py +++ b/docs/code/reliability/inverse_form/inverse_form_cantilever.py @@ -1,25 +1,25 @@ """ Inverse FORM - Cantilever Beam ------ +----------------------------------- +The following example is Example 7.2 from Chapter 7 of :cite:`FORM_XDu`. -A cantilever beam (example 7.2 in :cite:`FORM_XDu`) is considered to fail if the displacement at the tip exceeds the -threshold :math:`D_0`. The performance function :math:`G(\textbf{U})` of this problem is given by +A cantilever beam is considered to fail if the displacement at the tip exceeds the threshold :math:`D_0`. +The performance function :math:`G(\\textbf{U})` of this problem is given by -.. math:: G = D_0 - \frac{4L^3}{Ewt} \sqrt{ \left(\frac{P_x}{w^2}\right)^2 + \left(\frac{P_y}{t^2}\right)^2} +.. math:: G = D_0 - \\frac{4L^3}{Ewt} \\sqrt{ \\left(\\frac{P_x}{w^2}\\right)^2 + \\left(\\frac{P_y}{t^2}\\right)^2} Where the external forces are modeled as random variables :math:`P_x \sim N(500, 100)` and :math:`P_y \sim N(1000,100)`. -The constants in the problem are length (:math:`L=100`), elastic modulus (:math:E=30\times 10^6), cross section width -(:math:`w=2`) and cross section height (:math:`t=4`). +The constants in the problem are length (:math:`L=100`), elastic modulus (:math:`E=30\\times 10^6`), cross section width +(:math:`w=2`), cross section height (:math:`t=4`), and :math:`D_0=3`. """ # %% md # -# Import the necessary modules. +# First, we import the necessary modules. # %% + import numpy as np -import matplotlib.pyplot as plt -plt.style.use('ggplot') from scipy import stats from UQpy.distributions import Normal from UQpy.reliability.taylor_series import InverseFORM @@ -34,7 +34,7 @@ # %% -model = PythonModel(model_script='performance_function.py', model_object_name="cantilever_beam") +model = PythonModel(model_script='local_pfn.py', model_object_name="cantilever_beam") runmodel_object = RunModel(model=model) # %% md @@ -70,4 +70,4 @@ print('Design point in original space:', inverse_form.design_point_x[0]) print('Hasofer-Lind reliability index:', beta) print('Probability of failure at design point:', stats.norm.cdf(-beta)) -print('Number of iterations:', inverse_form.iteration_record[0]) \ No newline at end of file +print('Number of iterations:', inverse_form.iteration_record[0]) diff --git a/docs/code/reliability/inverse_form/performance_function.py b/docs/code/reliability/inverse_form/local_pfn.py similarity index 87% rename from docs/code/reliability/inverse_form/performance_function.py rename to docs/code/reliability/inverse_form/local_pfn.py index 4bfbcecf..4f40a8a7 100644 --- a/docs/code/reliability/inverse_form/performance_function.py +++ b/docs/code/reliability/inverse_form/local_pfn.py @@ -1,3 +1,9 @@ +""" + +Auxiliary file +============================================== + +""" import numpy as np diff --git a/docs/source/reliability/inverse_form.rst b/docs/source/reliability/inverse_form.rst index 79bf8af8..6dde4b65 100644 --- a/docs/source/reliability/inverse_form.rst +++ b/docs/source/reliability/inverse_form.rst @@ -9,7 +9,7 @@ where :math:`\textbf{U}^\star` is the expansion point, :math:`G(\textbf{U})` is the standard normal space and :math:`\nabla G(\textbf{U}^\star)` is the gradient of :math:`G(\textbf{U})` evaluated at :math:`\textbf{U}^\star`. The probability failure is approximated as -.. math:: P_{fail} = \Phi(-\beta_{HL}) +.. math:: p_{fail} = \Phi(-\beta_{HL}) where :math:`\Phi(\cdot)` is the standard normal cumulative distribution function and :math:`\beta_{HL}=||\textbf{U}^*||` is the norm of the design point known as the Hasofer-Lind reliability index. @@ -24,14 +24,14 @@ are: Problem Statement ------ +----------------- Compute :math:`u^* = \text{argmin}\ G(\textbf{U})` such that :math:`||\textbf{U}||=\beta`. The feasibility criteria :math:`||\textbf{U}||=\beta` may be equivalently defined as :math:`\beta = -\Phi^{-1}(p_{fail})`, where :math:`\Phi^{-1}(\cdot)` is the inverse standard normal CDF. Algorithm ------ +----------------- This method implements a gradient descent algorithm to solve the optimization problem within the tolerances specified by :math:`\text{tolerance}_{\textbf{U}}` (:code:`tolerance_u`) and :math:`\text{tolerance}_{\nabla G(\textbf{U})}` (:code:`tolerance_gradient`). @@ -50,12 +50,12 @@ The :class:`.InverseFORM` class is imported using the following command: >>> from UQpy.reliability.taylor_series import InverseFORM Methods -""""""" +----------------- .. autoclass:: UQpy.reliability.taylor_series.InverseFORM :members: run Attributes -"""""""""" +----------------- .. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.alpha .. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.alpha_record .. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.beta @@ -71,4 +71,4 @@ Examples """""""""" .. toctree:: - FORM Examples <../auto_examples/reliability/inverse_form/index> \ No newline at end of file + InverseFORM Examples <../auto_examples/reliability/inverse_form/index> \ No newline at end of file diff --git a/src/UQpy/reliability/taylor_series/InverseFORM.py b/src/UQpy/reliability/taylor_series/InverseFORM.py index 0e282607..6ec2adff 100644 --- a/src/UQpy/reliability/taylor_series/InverseFORM.py +++ b/src/UQpy/reliability/taylor_series/InverseFORM.py @@ -111,16 +111,17 @@ def __init__( self.alpha_record: list = [] """Record of alpha :math:`\\alpha=\\frac{\\nabla G(u)}{||\\nabla G(u)||}`""" self.beta: float = self.beta - """Feasibility criteria for the optimization :math:`||\\textbf{U}|| = \\beta`""" + """Feasibility criteria for the optimization :math:`||\\textbf{U}|| = \\beta_{HL}`""" self.beta_record: list = [] - """Record of Hasofer-Lind reliability index that defines the feasibility criteria :math:`||u||=\\beta_{HL}`""" + """Record of Hasofer-Lind reliability index that defines the feasibility criteria + :math:`||\\textbf{U}||=\\beta_{HL}`""" self.design_point_u: list = [] """Design point in the standard normal space :math:`\\textbf{U}`""" self.design_point_x: list = [] """Design point in the parameter space :math:`\\textbf{X}`""" self.error_record: list = [] """Record of the final error defined by - :math:`error_u = ||u_new - u||` and :math:`error_{gradient} = || \\nabla G(u_new) - \\nabla G(u)||`""" + :math:`error_u = ||u_{new} - u||` and :math:`error_{\\nabla G(u)} = || \\nabla G(u_{new}) - \\nabla G(u)||`""" self.iteration_record: list = [] """Record of the number of iterations before algorithm termination""" self.failure_probability_record: list = [] From b74b418e173572f403ddb13e4b6d1c32eb4204d5 Mon Sep 17 00:00:00 2001 From: connor-krill Date: Wed, 16 Aug 2023 10:06:22 -0400 Subject: [PATCH 14/29] corrected vector notation in math statement --- docs/source/reliability/index.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/reliability/index.rst b/docs/source/reliability/index.rst index d3010039..0464cbed 100644 --- a/docs/source/reliability/index.rst +++ b/docs/source/reliability/index.rst @@ -3,7 +3,7 @@ Reliability Reliability of a system refers to the assessment of its probability of failure (i.e the system no longer satisfies some performance measures), given the model uncertainty in the structural, environmental and load parameters. Given a vector -of random variables :math:`\textbf{X}=\{X_1, X_2, \ldots, X_n\} \in \mathcal{D}_\textbf{X}\subset \mathbb{R}^n`, where +of random variables :math:`\textbf{X}=[X_1, X_2, \ldots, X_n]^T \in \mathcal{D}_\textbf{X}\subset \mathbb{R}^n`, where :math:`\mathcal{D}` is the domain of interest and :math:`f_{\textbf{X}}(\textbf{x})` is its joint probability density function then, the probability that the system will fail is defined as @@ -22,8 +22,8 @@ transformation and can support reliability analysis for problems with arbitraril This module contains functionality for all reliability methods supported in :py:mod:`UQpy`. The module currently contains the following classes: -- :class:`.TaylorSeries`: Class to perform reliability analysis using First Order reliability Method (:class:`FORM`) and Second Order - Reliability Method (:class:`SORM`). +- :class:`.TaylorSeries`: Class to perform reliability analysis using First Order Reliability Method (:class:`FORM`), + Inverse First Order Reliability Method (:class:`InverseFORM`) and Second Order Reliability Method (:class:`SORM`). - :class:`.SubsetSimulation`: Class to perform reliability analysis using subset simulation. From 770256bd9e49f6b65cabcee7747934868c60fd62 Mon Sep 17 00:00:00 2001 From: connor-krill Date: Wed, 16 Aug 2023 10:07:09 -0400 Subject: [PATCH 15/29] edited markdown for clarity and notational consistency --- .../inverse_form/inverse_form_cantilever.py | 2 +- docs/source/reliability/inverse_form.rst | 13 ++++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/docs/code/reliability/inverse_form/inverse_form_cantilever.py b/docs/code/reliability/inverse_form/inverse_form_cantilever.py index b5bfcb79..54f355e7 100644 --- a/docs/code/reliability/inverse_form/inverse_form_cantilever.py +++ b/docs/code/reliability/inverse_form/inverse_form_cantilever.py @@ -59,7 +59,7 @@ # With everything defined we are ready to run the inverse first-order reliability method and print the results. # The solution to this problem given by Du is :math:`\textbf{U}^*=(1.7367, 0.16376)` with a reliability index of # :math:`\beta_{HL}=||\textbf{U}^*||=1.7444` and probability of failure of -# :math:`p_{fail} = \Phi(-\beta_{HL})=\Phi(-1.7444)=0.04054`. We expect this problem to converge in 4 iterations. +# :math:`p_{fail} = \Phi(-\beta_{HL})=0.04054`. We expect this problem to converge in 4 iterations. # We confirm our design point matches this length, and therefore has a probability of failure specified by our input. # %% diff --git a/docs/source/reliability/inverse_form.rst b/docs/source/reliability/inverse_form.rst index 6dde4b65..f3470823 100644 --- a/docs/source/reliability/inverse_form.rst +++ b/docs/source/reliability/inverse_form.rst @@ -31,9 +31,9 @@ The feasibility criteria :math:`||\textbf{U}||=\beta` may be equivalently define :math:`\beta = -\Phi^{-1}(p_{fail})`, where :math:`\Phi^{-1}(\cdot)` is the inverse standard normal CDF. Algorithm ------------------ +--------- This method implements a gradient descent algorithm to solve the optimization problem within the tolerances specified by -:math:`\text{tolerance}_{\textbf{U}}` (:code:`tolerance_u`) and :math:`\text{tolerance}_{\nabla G(\textbf{U})}` (:code:`tolerance_gradient`). +:code:`tolerance_u` and :code:`tolerance_gradient`. 0. Define :math:`u_0` and :math:`\beta` directly or :math:`\beta=-\Phi^{-1}(p_\text{fail})` 1. Set :math:`u=u_0` and :math:`\text{converged}=False` @@ -50,12 +50,14 @@ The :class:`.InverseFORM` class is imported using the following command: >>> from UQpy.reliability.taylor_series import InverseFORM Methods ------------------ +------- + .. autoclass:: UQpy.reliability.taylor_series.InverseFORM :members: run Attributes ------------------ +---------- + .. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.alpha .. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.alpha_record .. autoattribute:: UQpy.reliability.taylor_series.InverseFORM.beta @@ -68,7 +70,8 @@ Attributes Examples -"""""""""" +-------- + .. toctree:: InverseFORM Examples <../auto_examples/reliability/inverse_form/index> \ No newline at end of file From fa0230243a164d895c95a3a051fc3113cda78b4a Mon Sep 17 00:00:00 2001 From: connor-krill Date: Wed, 16 Aug 2023 10:07:46 -0400 Subject: [PATCH 16/29] editted docstrings for clarity and notational consistency --- .../reliability/taylor_series/InverseFORM.py | 29 ++++++++++--------- .../reliability/test_inverse_form.py | 9 +++--- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/UQpy/reliability/taylor_series/InverseFORM.py b/src/UQpy/reliability/taylor_series/InverseFORM.py index 6ec2adff..be4610cf 100644 --- a/src/UQpy/reliability/taylor_series/InverseFORM.py +++ b/src/UQpy/reliability/taylor_series/InverseFORM.py @@ -34,9 +34,9 @@ def __init__( """Class to perform the Inverse First Order Reliability Method. Each time :meth:`run` is called the results are appended to the class attributes. - This is a child class of the :class:`TaylorSeries` class. - By default, :code:`tolerance_u` and :code:`tolerance_gradient` must be satisfied for convergence. - Specifying a tolerance as :code:`None` will ignore that criteria, but both cannot be :code:`None`. + By default, :code:`tolerance_u` and :code:`tolerance_gradient` must be satisfied for convergence. + Specifying a tolerance as :code:`None` will ignore that criteria, but both cannot be :code:`None`. + This is a child class of the :class:`TaylorSeries` class. :param distributions: Marginal probability distributions of each random variable. Must be of type :class:`.DistributionContinuous1D` or :class:`.JointIndependent`. @@ -46,19 +46,19 @@ def __init__( Only one of :code:`p_fail` or :code:`beta` may be provided. :param beta: Hasofer-Lind reliability index defining the feasibility criteria as :math:`||u|| = \\beta`. Default: :code:`None`. - Only one of code:`p_fail` or :code:`beta` may be provided. + Only one of :code:`p_fail` or :code:`beta` may be provided. :param seed_x: Point in the parameter space :math:`\mathbf{X}` to start from. Only one of :code:`seed_x` or :code:`seed_u` may be provided. - If neither is provided, the zero vector in :math:`\mathbf{U}` space is used as the seed. + If neither is provided, the zero vector in :math:`\mathbf{U}` space is the seed. :param seed_u: Point in the uncorrelated standard normal space :math:`\mathbf{U}` to start from. Only one of :code:`seed_x` or :code:`seed_u` may be provided. - If neither is provided, the zero vector in :math:`\mathbf{U}` space is used as the seed. + If neither is provided, the zero vector in :math:`\mathbf{U}` space is the seed. :param df_step: Finite difference step in standard normal space. Default: :math:`0.01` :param corr_x: Correlation matrix :math:`\mathbf{C_X}` of the random vector :math:`\mathbf{X}` :param corr_z: Correlation matrix :math:`\mathbf{C_Z}` of the random vector :math:`\mathbf{Z}` Default: :code:`corr_z` is the identity matrix. :param max_iterations: Maximum number of iterations for the `HLRF` algorithm. Default: :math:`100` - :param tolerance_u: Convergence threshold for criterion :math:`||\mathbf{U}^{k} - \mathbf{U}^{k-1}||_2 \leq` + :param tolerance_u: Convergence threshold for criterion :math:`||\mathbf{U}_i - \mathbf{U}_{i-1}||_2 \leq` :code:`tolerance_u` of the `HLRF` algorithm. Default: :math:`1.0e-3` :param tolerance_gradient: Convergence threshold for criterion @@ -107,11 +107,12 @@ def __init__( # Initialize attributes self.alpha: float = np.nan - """Normalized vector in :math:`\\textbf{U}`-space""" + """Normalized gradient vector in :math:`\\textbf{U}` space""" self.alpha_record: list = [] - """Record of alpha :math:`\\alpha=\\frac{\\nabla G(u)}{||\\nabla G(u)||}`""" + """Record of :math:`\\alpha=\\frac{\\nabla G(u)}{||\\nabla G(u)||}`""" self.beta: float = self.beta - """Feasibility criteria for the optimization :math:`||\\textbf{U}|| = \\beta_{HL}`""" + """Hasofer-Lind reliability index that defines the feasibility criteria as + :math:`||\\textbf{U}|| = \\beta_{HL}`""" self.beta_record: list = [] """Record of Hasofer-Lind reliability index that defines the feasibility criteria :math:`||\\textbf{U}||=\\beta_{HL}`""" @@ -131,11 +132,11 @@ def run(self, seed_x: Union[list, np.ndarray] = None, seed_u: Union[list, np.nda """Runs the inverse FORM algorithm. :param seed_x: Point in the parameter space :math:`\mathbf{X}` to start from. - Only one of `seed_x` or `seed_u` may be provided. - If neither is provided, the zero vector in :math:`\mathbf{U}` space is used as the seed. + Only one of :code:`seed_x` or :code:`seed_u` may be provided. + If neither is provided, the zero vector in :math:`\mathbf{U}` space is the seed. :param seed_u: Point in the uncorrelated standard normal space :math:`\mathbf{U}` to start from. - Only one of `seed_x` or `seed_u` may be provided. - If neither is provided, the zero vector in :math:`\mathbf{U}` space is used as the seed. + Only one of :code:`seed_x` or :code:`seed_u` may be provided. + If neither is provided, the zero vector in :math:`\mathbf{U}` space is the seed. """ self.logger.info('UQpy: Running InverseFORM...') if (seed_x is not None) and (seed_u is not None): diff --git a/tests/unit_tests/reliability/test_inverse_form.py b/tests/unit_tests/reliability/test_inverse_form.py index 3233dde1..1c0e753e 100644 --- a/tests/unit_tests/reliability/test_inverse_form.py +++ b/tests/unit_tests/reliability/test_inverse_form.py @@ -10,11 +10,11 @@ @pytest.fixture def inverse_form(): - """Example 7.2 from Chapter 7 of Du 2005 + """Example 7.2 from Chapter 7 of X. Du 2005 Distributions are :math:`P_X \\sim N(500, 100)`, :math:`P_Y \\sim N(1000, 100)` - - Solution from the reference is :math:`u^*=(1.7367, 0.16376)` + Solution from the reference is :math:`u^*=(1.7367, 0.16376)`. + Tolerances of :math:`1e-5` are used to ensure convergence to level of precision given by Du. """ path = os.path.abspath(os.path.dirname(__file__)) os.chdir(path) @@ -31,7 +31,6 @@ def inverse_form(): def test_no_seed(inverse_form): - """Solution to Chapter 7 Example 7.2 from Du 2005""" inverse_form.run() assert np.allclose(inverse_form.design_point_u, np.array([1.7367, 0.16376]), atol=1e-4) @@ -81,7 +80,6 @@ def test_beta(): delete_files=True) runmodel_object = RunModel(model=python_model) distributions = [Normal(loc=500, scale=100), Normal(loc=1_000, scale=100)] - # with pytest.raises(ValueError, match='UQpy: At least one tolerance .* must be provided'): inverse_form = InverseFORM(distributions=distributions, runmodel_object=runmodel_object, p_fail=None, @@ -91,6 +89,7 @@ def test_beta(): def test_no_beta_no_pfail(): + """Expected behavior is to raise ValueError and inform the user exactly one in put must be provided""" path = os.path.abspath(os.path.dirname(__file__)) os.chdir(path) python_model = PythonModel(model_script='example_7_2.py', From adafcdfb17e86098adfb8de5db06631cdcf6500b Mon Sep 17 00:00:00 2001 From: connor-krill Date: Wed, 16 Aug 2023 10:08:20 -0400 Subject: [PATCH 17/29] added references to the new child class InverseFORM --- docs/source/reliability/taylor_series.rst | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/docs/source/reliability/taylor_series.rst b/docs/source/reliability/taylor_series.rst index 9687f597..78ec77a6 100644 --- a/docs/source/reliability/taylor_series.rst +++ b/docs/source/reliability/taylor_series.rst @@ -1,27 +1,30 @@ Taylor Series ------------- -:class:`.TaylorSeries` is a class that calculates the reliability of a model using the First Order Reliability Method (FORM) -or the Second Order Reliability Method (SORM) based on the first-order and second-order Taylor series expansion -approximation of the performance function, respectively (:cite:`TaylorSeries1`, :cite:`TaylorSeries2`). +:class:`.TaylorSeries` is a class that calculates the reliability of a model using the First Order Reliability Method +(FORM), Inverse First Order Reliability Method (InverseFORM) or Second Order Reliability Method (SORM) based on the +first-order and second-order Taylor series expansion approximation of the performance function, respectively +(:cite:`TaylorSeries1`, :cite:`FORM_XDu`, :cite:`TaylorSeries2`). .. image:: ../_static/Reliability_FORM.png :scale: 40 % :alt: Graphical representation of the FORM. :align: center -The :class:`.TaylorSeries` class is the parent class of the :class:`.FORM` and :class:`.SORM` classes that perform the FORM and SORM, -respectively. These classes can be imported in a python script using the following command: +The :class:`.TaylorSeries` class is the parent class of the :class:`.FORM`, :class:`InverseFORM`, and :class:`.SORM` +classes that perform the FORM, InverseFORM, and SORM, respectively. +These classes can be imported in a python script using the following command: ->>> from UQpy.reliability.taylor_series import FORM, SORM +>>> from UQpy.reliability.taylor_series import FORM, InverseFORM, SORM .. toctree:: :maxdepth: 1 FORM - SORM InverseFORM + SORM + From 81335384ef26af127077df38e9aeb1fb407ce9dc Mon Sep 17 00:00:00 2001 From: connor-krill Date: Thu, 17 Aug 2023 13:22:30 -0400 Subject: [PATCH 18/29] grammatical and notation edits to make documentation more consistent with literature references --- docs/source/reliability/index.rst | 15 ++++++++------- docs/source/reliability/subset.rst | 24 ++++++++++++------------ 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/docs/source/reliability/index.rst b/docs/source/reliability/index.rst index d3010039..51362cfb 100644 --- a/docs/source/reliability/index.rst +++ b/docs/source/reliability/index.rst @@ -2,19 +2,20 @@ Reliability =========== Reliability of a system refers to the assessment of its probability of failure (i.e the system no longer satisfies some -performance measures), given the model uncertainty in the structural, environmental and load parameters. Given a vector -of random variables :math:`\textbf{X}=\{X_1, X_2, \ldots, X_n\} \in \mathcal{D}_\textbf{X}\subset \mathbb{R}^n`, where -:math:`\mathcal{D}` is the domain of interest and :math:`f_{\textbf{X}}(\textbf{x})` is its joint probability density -function then, the probability that the system will fail is defined as +performance measure), given the model uncertainty in the structural, environmental and load parameters. Given a vector +of random variables :math:`\textbf{X}=[X_1, X_2, \ldots, X_n]^T \in \mathcal{D}_\textbf{X}\subset \mathbb{R}^n`, where +:math:`\mathcal{D}_\textbf{X}` is the domain of interest and :math:`f_{\textbf{X}}(\textbf{x})` is its joint probability density +function, then the probability that the system will fail is defined as -.. math:: P_f =\mathbb{P}(g(\textbf{X}) \leq 0) = \int_{D_f} f_{\textbf{X}}(\textbf{x})d\textbf{x} = \int_{\{\textbf{X}:g(\textbf{X})\leq 0 \}} f_{\textbf{X}}(\textbf{x})d\textbf{x} +.. math:: P_f =\mathbb{P}(g(\textbf{X}) \leq 0) = \int_{\mathcal{D}_f} f_{\textbf{X}}(\textbf{x})d\textbf{x} = \int_{\{\textbf{X}:g(\textbf{X})\leq 0 \}} f_{\textbf{X}}(\textbf{x})d\textbf{x} -where :math:`g(\textbf{X})` is the so-called performance function. The reliability problem is often formulated in the +where :math:`g(\textbf{X})` is the so-called performance function and :math:`\mathcal{D}_f=\{\textbf{X}:g(\textbf{X})\leq 0 \}` is the failure domain. +The reliability problem is often formulated in the standard normal space :math:`\textbf{U}\sim \mathcal{N}(\textbf{0}, \textbf{I}_n)`, which means that a nonlinear isoprobabilistic transformation from the generally non-normal parameter space -:math:`\textbf{X}\sim f_{\textbf{X}}(\cdot)` to the standard normal is required (see the :py:mod:`.transformations` module). +:math:`\textbf{X}\sim f_{\textbf{X}}(\cdot)` to the standard normal space is required (see the :py:mod:`.transformations` module). The performance function in the standard normal space is denoted :math:`G(\textbf{U})`. :py:mod:`.UQpy` does not require this transformation and can support reliability analysis for problems with arbitrarily distributed parameters. diff --git a/docs/source/reliability/subset.rst b/docs/source/reliability/subset.rst index 5da7def8..555b76af 100644 --- a/docs/source/reliability/subset.rst +++ b/docs/source/reliability/subset.rst @@ -2,27 +2,27 @@ Subset Simulation ------------------- In the subset simulation method :cite:`SubsetSimulation` the probability of failure :math:`P_f` is approximated by a product of probabilities -of more frequent events. That is, the failure event :math:`G = \{\textbf{x} \in \mathbb{R}^n:G(\textbf{x}) \leq 0\}`, +of more frequent events. That is, the failure event :math:`G = \{\textbf{X} \in \mathbb{R}^n:g(\textbf{X}) \leq 0\}`, is expressed as the of union of `M` nested intermediate events :math:`G_1,G_2,\cdots,G_M` such that :math:`G_1 \supset G_2 \supset \cdots \supset G_M`, and :math:`G = \cap_{i=1}^{M} G_i`. The intermediate failure events -are defined as :math:`G_i=\{G(\textbf{x})\le b_i\}`, where :math:`b_1>b_2>\cdots>b_i=0` are positive thresholds selected -such that each conditional probability :math:`P(G_i | G_{i-1}), ~i=2,3,\cdots,M-1` equals a target probability value +are defined as :math:`G_i=\{g(\textbf{X})\le b_i\}`, where :math:`b_1>b_2>\cdots>b_M=0` are non-negative thresholds selected +such that each conditional probability :math:`P(G_{i+1} | G_{i}),\ i=1,2,\cdots,M-1` equals a target probability value :math:`p_0`. The probability of failure :math:`P_f` is estimated as: -.. math:: P_f = P\left(\cap_{i=1}^M G_i\right) = P(G_1)\prod_{i=2}^M P(G_i | G_{i-1}) +.. math:: P_f = P\left(\bigcap_{i=1}^M G_i\right) = P(G_1)\prod_{i=1}^{M-1} P(G_{i+1} | G_{i}) where the probability :math:`P(G_1)` is computed through Monte Carlo simulations. In order to estimate the conditional -probabilities :math:`P(G_i|G_{i-1}),~j=2,3,\cdots,M` generation of Markov Chain Monte Carlo (MCMC) samples from the -conditional pdf :math:`p_{\textbf{U}}(\textbf{u}|G_{i-1})` is required. In the context of subset simulation, the Markov +probabilities :math:`P(G_{i+1}|G_i),~i=1,2,\cdots,M-1` generation of Markov Chain Monte Carlo (MCMC) samples from the +conditional pdf :math:`p_{\textbf{X}}(\textbf{x}|G_i)` is required. In the context of subset simulation, the Markov chains are constructed through a two-step acceptance/rejection criterion. Starting from a Markov chain state -:math:`\textbf{x}` and a proposal distribution :math:`q(\cdot|\textbf{x})`, a candidate sample :math:`\textbf{w}` is -generated. In the first stage, the sample :math:`\textbf{w}` is accepted/rejected with probability +:math:`\textbf{X}` and a proposal distribution :math:`q(\cdot|\textbf{X})`, a candidate sample :math:`\textbf{W}` is +generated. In the first stage, the sample :math:`\textbf{W}` is accepted/rejected with probability -.. math:: \alpha=\min\bigg\{1, \frac{p(\textbf{w})q(\textbf{x}|\textbf{w})}{p(\textbf{x})q(\textbf{w}|\textbf{x})}\bigg\} +.. math:: \alpha=\min\bigg\{1, \frac{p_\textbf{X}(\textbf{w})q(\textbf{x}|\textbf{W})}{p_\textbf{X}(\textbf{x})q(\textbf{w}|\textbf{X})}\bigg\} -and in the second stage is accepted/rejected based on whether the sample belongs to the failure region :math:`G_j`. -:class:`.SubSetSimulation` can be used with any of the available (or custom) :class:`.MCMC` classes in the -:py:mod:`sampling` module. +and in the second stage is accepted/rejected based on whether the sample belongs to the failure region :math:`G_i`. +:class:`.SubsetSimulation` can be used with any of the available (or custom) :class:`.MCMC` classes in the +:py:mod:`Sampling` module. SubsetSimulation Class ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From 5a6a1ce2f61c400bef68bb4c77f01b8633381725 Mon Sep 17 00:00:00 2001 From: connor-krill Date: Thu, 17 Aug 2023 13:30:14 -0400 Subject: [PATCH 19/29] added state_function attribute, correct attribute behavior when max number of iterations is reached --- .../reliability/taylor_series/InverseFORM.py | 33 +++++++++++-------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/src/UQpy/reliability/taylor_series/InverseFORM.py b/src/UQpy/reliability/taylor_series/InverseFORM.py index be4610cf..991d5c10 100644 --- a/src/UQpy/reliability/taylor_series/InverseFORM.py +++ b/src/UQpy/reliability/taylor_series/InverseFORM.py @@ -127,6 +127,8 @@ def __init__( """Record of the number of iterations before algorithm termination""" self.failure_probability_record: list = [] """Record of the probability of failure defined by :math:`p_{fail} = \\Phi(-\\beta_{HL})`""" + self.state_function: list = [] + """State function :math:`G(u)` evaluated at each step in the optimization""" def run(self, seed_x: Union[list, np.ndarray] = None, seed_u: Union[list, np.ndarray] = None): """Runs the inverse FORM algorithm. @@ -144,6 +146,7 @@ def run(self, seed_x: Union[list, np.ndarray] = None, seed_u: Union[list, np.nda # Allocate u and the gradient of G(u) as arrays u = np.zeros([self.max_iterations + 1, self.dimension]) + state_function = np.full(self.max_iterations + 1, np.nan) state_function_gradient = np.zeros([self.max_iterations + 1, self.dimension]) # Set initial seed. If both seed_x and seed_u are None, the initial seed is a vector of zeros in U space. @@ -170,12 +173,14 @@ def run(self, seed_x: Union[list, np.ndarray] = None, seed_u: Union[list, np.nda self.nataf_object.run(samples_z=z, jacobian=True) x = self.nataf_object.samples_x self.logger.info(f'Design Point U: {u[iteration, :]}\nDesign Point X: {x}\n') - state_function_gradient[iteration + 1, :], _, _ = self._derivatives(point_u=u[iteration, :], - point_x=x, - runmodel_object=self.runmodel_object, - nataf_object=self.nataf_object, - df_step=self.df_step, - order='first') + state_function_gradient[iteration + 1, :], qoi, _ = self._derivatives(point_u=u[iteration, :], + point_x=x, + runmodel_object=self.runmodel_object, + nataf_object=self.nataf_object, + df_step=self.df_step, + order='first') + self.logger.info(f'State Function: {qoi}') + self.state_function[iteration] = qoi alpha = state_function_gradient[iteration + 1] alpha /= np.linalg.norm(state_function_gradient[iteration + 1]) @@ -196,11 +201,11 @@ def run(self, seed_x: Union[list, np.ndarray] = None, seed_u: Union[list, np.nda if iteration >= self.max_iterations: self.logger.info(f'UQpy: Maximum number of iterations {self.max_iterations} reached before convergence') - else: - self.alpha_record.append(alpha) - self.beta_record.append(self.beta) - self.design_point_u.append(u[iteration, :]) - self.design_point_x.append(x) - self.error_record.append((error_u, error_gradient)) - self.iteration_record.append(iteration) - self.failure_probability_record.append(self.p_fail) + self.alpha_record.append(alpha) + self.beta_record.append(self.beta) + self.design_point_u.append(u[iteration, :]) + self.design_point_x.append(x) + self.error_record.append((error_u, error_gradient)) + self.iteration_record.append(iteration) + self.failure_probability_record.append(self.p_fail) + self.state_function.append(state_function) From d9923886b01190ee6b20b6e280fdba63548ec575 Mon Sep 17 00:00:00 2001 From: connor-krill Date: Thu, 17 Aug 2023 13:34:49 -0400 Subject: [PATCH 20/29] fixed minor bug with state_function attribute --- src/UQpy/reliability/taylor_series/InverseFORM.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/UQpy/reliability/taylor_series/InverseFORM.py b/src/UQpy/reliability/taylor_series/InverseFORM.py index 991d5c10..bac070f3 100644 --- a/src/UQpy/reliability/taylor_series/InverseFORM.py +++ b/src/UQpy/reliability/taylor_series/InverseFORM.py @@ -180,7 +180,7 @@ def run(self, seed_x: Union[list, np.ndarray] = None, seed_u: Union[list, np.nda df_step=self.df_step, order='first') self.logger.info(f'State Function: {qoi}') - self.state_function[iteration] = qoi + state_function[iteration + 1] = qoi alpha = state_function_gradient[iteration + 1] alpha /= np.linalg.norm(state_function_gradient[iteration + 1]) From a75aac526ec918fe4a95b7df38e059cc99acb63b Mon Sep 17 00:00:00 2001 From: connor-krill Date: Fri, 18 Aug 2023 13:07:52 -0400 Subject: [PATCH 21/29] cleaned up notation and attribute documentation --- docs/source/reliability/subset.rst | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/docs/source/reliability/subset.rst b/docs/source/reliability/subset.rst index 555b76af..be50bfc9 100644 --- a/docs/source/reliability/subset.rst +++ b/docs/source/reliability/subset.rst @@ -24,26 +24,24 @@ and in the second stage is accepted/rejected based on whether the sample belongs :class:`.SubsetSimulation` can be used with any of the available (or custom) :class:`.MCMC` classes in the :py:mod:`Sampling` module. -SubsetSimulation Class -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - The :class:`.SubsetSimulation` class is imported using the following command: >>> from UQpy.reliability.SubsetSimulation import SubsetSimulation Methods """"""" + .. autoclass:: UQpy.reliability.SubsetSimulation Attributes """""""""" -.. autoattribute:: UQpy.reliability.SubsetSimulation.samples -.. autoattribute:: UQpy.reliability.SubsetSimulation.performance_function_per_level -.. autoattribute:: UQpy.reliability.SubsetSimulation.performance_threshold_per_level + +.. autoattribute:: UQpy.reliability.SubsetSimulation.dependent_chains_CoV .. autoattribute:: UQpy.reliability.SubsetSimulation.failure_probability .. autoattribute:: UQpy.reliability.SubsetSimulation.independent_chains_CoV -.. autoattribute:: UQpy.reliability.SubsetSimulation.dependent_chains_CoV - +.. autoattribute:: UQpy.reliability.SubsetSimulation.performance_function_per_level +.. autoattribute:: UQpy.reliability.SubsetSimulation.performance_threshold_per_level +.. autoattribute:: UQpy.reliability.SubsetSimulation.samples Examples """""""""" From adad1d0cb1ee73b51bc70b8e8bf3c8811ff2f3cf Mon Sep 17 00:00:00 2001 From: connor-krill Date: Fri, 18 Aug 2023 13:08:06 -0400 Subject: [PATCH 22/29] deleted unused file --- docs/code/reliability/subset_simulation/pfn.py | 11 ----------- 1 file changed, 11 deletions(-) delete mode 100644 docs/code/reliability/subset_simulation/pfn.py diff --git a/docs/code/reliability/subset_simulation/pfn.py b/docs/code/reliability/subset_simulation/pfn.py deleted file mode 100644 index 9dbd2179..00000000 --- a/docs/code/reliability/subset_simulation/pfn.py +++ /dev/null @@ -1,11 +0,0 @@ -""" - -Auxiliary File -====================================================================== - -""" -import numpy as np - - -def run_python_model(samples, b_eff, d): - return [b_eff * np.sqrt(d) - np.sum(samples[i, :]) for i in range(samples.shape[0])] From 6359c16806620eabbb1664ee193d5eec274450bf Mon Sep 17 00:00:00 2001 From: connor-krill Date: Fri, 18 Aug 2023 13:09:53 -0400 Subject: [PATCH 23/29] cleaned up and added docstrings to public and hidden methods, renamed internal variables for clarity and consistency with literature --- src/UQpy/reliability/SubsetSimulation.py | 240 ++++++++++++----------- 1 file changed, 124 insertions(+), 116 deletions(-) diff --git a/src/UQpy/reliability/SubsetSimulation.py b/src/UQpy/reliability/SubsetSimulation.py index c0d5f663..99e8989d 100644 --- a/src/UQpy/reliability/SubsetSimulation.py +++ b/src/UQpy/reliability/SubsetSimulation.py @@ -1,9 +1,6 @@ -import copy -import logging - -from UQpy.run_model import RunModel -from UQpy.utilities.Utilities import process_random_state from UQpy.sampling import * +from UQpy.utilities.Utilities import process_random_state +from UQpy.utilities.ValidationTypes import PositiveInteger class SubsetSimulation: @@ -15,25 +12,25 @@ def __init__( sampling: MCMC, samples_init: np.ndarray = None, conditional_probability: Annotated[Union[float, int], Is[lambda x: 0 <= x <= 1]] = 0.1, - nsamples_per_subset: int = 1000, - max_level: int = 10, + nsamples_per_subset: PositiveInteger = 1000, + max_level: PositiveInteger = 10, ): """ Perform Subset Simulation to estimate probability of failure. This class estimates probability of failure for a user-defined model using Subset Simulation. The class can - use one of several mcmc algorithms to draw conditional samples. + use one of several MCMC algorithms to draw conditional samples. :param runmodel_object: The computational model. It should be of type :class:`.RunModel`. :param sampling: Specifies the :class:`MCMC` algorithm. :param samples_init: A set of samples from the specified probability distribution. These are the samples from the original distribution. They are not conditional samples. The samples must be an array of size :code:`samples_number_per_subset x dimension`. - If `samples_init` is not specified, the :class:`.SubsetSimulation` class will use the :class:`.MCMC` class - created with the aid of `sampling` to draw the initial samples. - :param conditional_probability: Conditional probability for each conditional level. - :param nsamples_per_subset: Number of samples to draw in each conditional level. - :param max_level: Maximum number of allowable conditional levels. + If :code:`samples_init` is not specified, the :class:`.SubsetSimulation` class will use the :class:`.MCMC` class + created with the aid of :code:`sampling` to draw the initial samples. + :param conditional_probability: Conditional probability for each conditional level. Default: :math:`0.1` + :param nsamples_per_subset: Number of samples to draw in each conditional level. Default: :math:`1000` + :param max_level: Maximum number of allowable conditional levels. Default: :math:`10` """ # Initialize other attributes self._sampling_class = sampling @@ -44,112 +41,105 @@ def __init__( self.nsamples_per_subset = nsamples_per_subset self.max_level = max_level self.logger = logging.getLogger(__name__) - self.mcmc_objects = [sampling] - self.samples: list = list() - """A list of arrays containing the samples in each conditional level. The size of the list is equal to the - number of levels.""" + self.dependent_chains_CoV: float = None + """Coefficient of variation of the probability of failure estimate with dependent chains.""" + self.failure_probability: float = None + """Probability of failure estimate.""" + self.independent_chains_CoV: float = None + """Coefficient of variation of the probability of failure estimate assuming independent chains.""" self.performance_function_per_level: list = [] """A list of arrays containing the evaluation of the performance function at each sample in each conditional level. The size of the list is equal to the number of levels.""" self.performance_threshold_per_level: list = [] """Threshold value of the performance function for each conditional level. The size of the list is equal to the number of levels.""" + self.samples: list = [] + """A list of arrays containing the samples in each conditional level. The size of the list is equal to the + number of levels.""" self.logger.info("UQpy: Running Subset Simulation with mcmc of type: " + str(type(sampling))) - self.failure_probability: float = None - """Probability of failure estimate.""" - self.independent_chains_CoV: float = None - """Coefficient of variation of the probability of failure estimate assuming independent chains.""" - self.dependent_chains_CoV: float = None - """Coefficient of variation of the probability of failure estimate with dependent chains.""" - [self.failure_probability, self.independent_chains_CoV, self.dependent_chains_CoV] = self._run() - self.logger.info("UQpy: Subset Simulation Complete!") - # The run function executes the chosen subset simulation algorithm def _run(self): - """ - Execute subset simulation + """Execute subset simulation This is an instance method that runs subset simulation. It is automatically called when the :class:`.SubsetSimulation` class is instantiated. + + :return: failure_probability, probability_cov_independent, probability_cov_dependent """ - step = 0 + conditional_level = 0 n_keep = int(self.conditional_probability * self.nsamples_per_subset) - d12 = [] - d22 = [] + independent_chain_cov_squared = [] + dependent_chain_cov_squared = [] # Generate the initial samples - Level 0 # Here we need to make sure that we have good initial samples from the target joint density. if self.samples_init is None: self.logger.warning( - "UQpy: You have not provided initial samples.\n Subset simulation is highly sensitive " - "to the initial sample set. It is recommended that the user either:\n" - "- Provide an initial set of samples (samples_init) known to follow the distribution; " - "or\n - Provide a robust mcmc object that will draw independent initial samples from " - "the distribution." + "UQpy: You have not provided initial samples." + "\n Subset simulation is highly sensitive to the initial sample set. It is recommended that the user either:" + "\n- Provide an initial set of samples (samples_init) known to follow the joint distribution of the parameters; or" + "\n- Provide a robust mcmc object that will draw independent initial samples from the joint distribution of the parameters." ) self.mcmc_objects[0].run(nsamples=self.nsamples_per_subset) self.samples.append(self.mcmc_objects[0].samples) else: self.samples.append(self.samples_init) - # Run the model for the initial samples, sort them by their performance function, and identify the - # conditional level - self.runmodel_object.run(samples=np.atleast_2d(self.samples[step])) + # Run the model with initial samples, sort by their performance function, and identify the conditional level + self.runmodel_object.run(samples=np.atleast_2d(self.samples[conditional_level])) self.performance_function_per_level.append(np.squeeze(self.runmodel_object.qoi_list)) - g_ind = np.argsort(self.performance_function_per_level[step]) - self.performance_threshold_per_level.append(self.performance_function_per_level[step][g_ind[n_keep - 1]]) + g_ind = np.argsort(self.performance_function_per_level[conditional_level]) + self.performance_threshold_per_level.append(self.performance_function_per_level[conditional_level][g_ind[n_keep - 1]]) # Estimate coefficient of variation of conditional probability of first level - d1, d2 = self._compute_coefficient_of_variation(step) - d12.append(d1 ** 2) - d22.append(d2 ** 2) + independent_intermediate_cov, dependent_intermediate_cov = self._compute_intermediate_cov(conditional_level) + independent_chain_cov_squared.append(independent_intermediate_cov ** 2) + dependent_chain_cov_squared.append(dependent_intermediate_cov ** 2) self.logger.info("UQpy: Subset Simulation, conditional level 0 complete.") - while self.performance_threshold_per_level[step] > 0 and step < self.max_level: - - # Increment the conditional level - step += 1 + while self.performance_threshold_per_level[conditional_level] > 0 and conditional_level < self.max_level: + conditional_level += 1 # Increment the conditional level # Initialize the samples and the performance function at the next conditional level - self.samples.append(np.zeros_like(self.samples[step - 1])) - self.samples[step][:n_keep] = self.samples[step - 1][g_ind[0:n_keep], :] - self.performance_function_per_level.append(np.zeros_like(self.performance_function_per_level[step - 1])) - self.performance_function_per_level[step][:n_keep] = self.performance_function_per_level[step - 1][g_ind[:n_keep]] + self.samples.append(np.zeros_like(self.samples[conditional_level - 1])) + self.samples[conditional_level][:n_keep] = self.samples[conditional_level - 1][g_ind[0:n_keep], :] + self.performance_function_per_level.append(np.zeros_like(self.performance_function_per_level[conditional_level - 1])) + self.performance_function_per_level[conditional_level][:n_keep] = self.performance_function_per_level[conditional_level - 1][g_ind[:n_keep]] # Unpack the attributes new_sampler = copy.deepcopy(self._sampling_class) - new_sampler.seed = np.atleast_2d(self.samples[step][:n_keep, :]) - new_sampler.n_chains = len(np.atleast_2d(self.samples[step][:n_keep, :])) + new_sampler.seed = np.atleast_2d(self.samples[conditional_level][:n_keep, :]) + new_sampler.n_chains = len(np.atleast_2d(self.samples[conditional_level][:n_keep, :])) new_sampler.random_state = process_random_state(self._random_state) self.mcmc_objects.append(new_sampler) # Set the number of samples to propagate each chain (n_prop) in the conditional level - n_prop_test = (self.nsamples_per_subset / self.mcmc_objects[step].n_chains) + n_prop_test = self.nsamples_per_subset / self.mcmc_objects[conditional_level].n_chains if n_prop_test.is_integer(): - n_prop = (self.nsamples_per_subset // self.mcmc_objects[step].n_chains) + n_prop = self.nsamples_per_subset // self.mcmc_objects[conditional_level].n_chains else: raise AttributeError( - "UQpy: The number of samples per subset (nsamples_per_ss) must be an integer multiple of " - "the number of mcmc chains.") + "UQpy: The number of samples per subset (nsamples_per_subset) must be an integer multiple of " + "the number of MCMC chains.") # Propagate each chain n_prop times and evaluate the model to accept or reject. for i in range(n_prop - 1): # Propagate each chain if i == 0: - self.mcmc_objects[step].run(nsamples=2 * self.mcmc_objects[step].n_chains) + self.mcmc_objects[conditional_level].run(nsamples=2 * self.mcmc_objects[conditional_level].n_chains) else: - self.mcmc_objects[step].run(nsamples=self.mcmc_objects[step].n_chains) + self.mcmc_objects[conditional_level].run(nsamples=self.mcmc_objects[conditional_level].n_chains) # Decide whether a new simulation is needed for each proposed state - a = self.mcmc_objects[step].samples[i * n_keep : (i + 1) * n_keep, :] - b = self.mcmc_objects[step].samples[(i + 1) * n_keep : (i + 2) * n_keep, :] + a = self.mcmc_objects[conditional_level].samples[i * n_keep : (i + 1) * n_keep, :] + b = self.mcmc_objects[conditional_level].samples[(i + 1) * n_keep : (i + 2) * n_keep, :] test1 = np.equal(a, b) test = np.logical_and(test1[:, 0], test1[:, 1]) @@ -159,66 +149,72 @@ def _run(self): ind_true = [i for i, val in enumerate(test) if val] # Do not run the model for those samples where the mcmc state remains unchanged. - self.samples[step][[x + (i + 1) * n_keep for x in ind_true], :] = \ - self.mcmc_objects[step].samples[ind_true, :] - self.performance_function_per_level[step][[x + (i + 1) * n_keep for x in ind_true]] = \ - self.performance_function_per_level[step][ind_true] + self.samples[conditional_level][[x + (i + 1) * n_keep for x in ind_true], :] = \ + self.mcmc_objects[conditional_level].samples[ind_true, :] + self.performance_function_per_level[conditional_level][[x + (i + 1) * n_keep for x in ind_true]] = \ + self.performance_function_per_level[conditional_level][ind_true] # Run the model at each of the new sample points - x_run = self.mcmc_objects[step].samples[[x + (i + 1) * n_keep for x in ind_false], :] + x_run = self.mcmc_objects[conditional_level].samples[[x + (i + 1) * n_keep for x in ind_false], :] if x_run.size != 0: self.runmodel_object.run(samples=x_run) # Temporarily save the latest model runs - g_temp = np.asarray(self.runmodel_object.qoi_list[-len(x_run) :]) + response_function_values = np.asarray(self.runmodel_object.qoi_list[-len(x_run) :]) # Accept the states with g <= g_level - ind_accept = np.where(g_temp <= self.performance_threshold_per_level[step - 1])[0] - for ii in ind_accept: - self.samples[step][(i + 1) * n_keep + ind_false[ii]] = x_run[ii] - self.performance_function_per_level[step][(i + 1) * n_keep + ind_false[ii]] = g_temp[ii] + ind_accept = np.where(response_function_values <= self.performance_threshold_per_level[conditional_level - 1])[0] + for j in ind_accept: + self.samples[conditional_level][(i + 1) * n_keep + ind_false[j]] = x_run[j] + self.performance_function_per_level[conditional_level][(i + 1) * n_keep + ind_false[j]] = response_function_values[j] # Reject the states with g > g_level - ind_reject = np.where(g_temp > self.performance_threshold_per_level[step - 1])[0] - for ii in ind_reject: - self.samples[step][(i + 1) * n_keep + ind_false[ii]] =\ - self.samples[step][i * n_keep + ind_false[ii]] - self.performance_function_per_level[step][(i + 1) * n_keep + ind_false[ii]] = \ - self.performance_function_per_level[step][i * n_keep + ind_false[ii]] + ind_reject = np.where(response_function_values > self.performance_threshold_per_level[conditional_level - 1])[0] + for k in ind_reject: + self.samples[conditional_level][(i + 1) * n_keep + ind_false[k]] =\ + self.samples[conditional_level][i * n_keep + ind_false[k]] + self.performance_function_per_level[conditional_level][(i + 1) * n_keep + ind_false[k]] = \ + self.performance_function_per_level[conditional_level][i * n_keep + ind_false[k]] - g_ind = np.argsort(self.performance_function_per_level[step]) - self.performance_threshold_per_level.append(self.performance_function_per_level[step][g_ind[n_keep]]) + g_ind = np.argsort(self.performance_function_per_level[conditional_level]) + self.performance_threshold_per_level.append(self.performance_function_per_level[conditional_level][g_ind[n_keep]]) # Estimate coefficient of variation of conditional probability of first level - d1, d2 = self._compute_coefficient_of_variation(step) - d12.append(d1 ** 2) - d22.append(d2 ** 2) + independent_intermediate_cov, dependent_intermediate_cov = self._compute_intermediate_cov(conditional_level) + independent_chain_cov_squared.append(independent_intermediate_cov ** 2) + dependent_chain_cov_squared.append(dependent_intermediate_cov ** 2) - self.logger.info("UQpy: Subset Simulation, conditional level " + str(step) + " complete.") + self.logger.info("UQpy: Subset Simulation, conditional level " + str(conditional_level) + " complete.") - n_fail = len([value for value in self.performance_function_per_level[step] if value < 0]) + n_fail = len([value for value in self.performance_function_per_level[conditional_level] if value < 0]) - failure_probability = (self.conditional_probability ** step * n_fail / self.nsamples_per_subset) - probability_cov_independent = np.sqrt(np.sum(d12)) - probability_cov_dependent = np.sqrt(np.sum(d22)) + failure_probability = (self.conditional_probability ** conditional_level * n_fail / self.nsamples_per_subset) + probability_cov_independent = np.sqrt(np.sum(independent_chain_cov_squared)) + probability_cov_dependent = np.sqrt(np.sum(dependent_chain_cov_squared)) return failure_probability, probability_cov_independent, probability_cov_dependent - def _compute_coefficient_of_variation(self, step): - # Here, we assume that the initial samples are drawn to be uncorrelated such that the correction factors do not - # need to be computed. - if step == 0: + def _compute_intermediate_cov(self, conditional_level: int): + """Computes the coefficient of variation of the intermediate failure probability + + Assumes the initial samples are uncorrelated so correction factors do not need to be computed. + + :param conditional_level: Index :math:`i` for the intermediate subset + :return: independent_chains_cov, dependent_chains_cov + """ + if conditional_level == 0: independent_chains_cov = np.sqrt((1 - self.conditional_probability) / (self.conditional_probability * self.nsamples_per_subset)) dependent_chains_cov = np.sqrt((1 - self.conditional_probability) / (self.conditional_probability * self.nsamples_per_subset)) else: - n_c = int(self.conditional_probability * self.nsamples_per_subset) - n_s = int(1 / self.conditional_probability) - indicator = np.reshape(self.performance_function_per_level[step] < self.performance_threshold_per_level[step], (n_s, n_c)) - gamma = self._correlation_factor_gamma(indicator, n_s, n_c) - g_temp = np.reshape(self.performance_function_per_level[step], (n_s, n_c)) - beta_hat = self._correlation_factor_beta(g_temp, step) + n_chains = int(self.conditional_probability * self.nsamples_per_subset) + n_samples_per_chain = int(1 / self.conditional_probability) + indicator = np.reshape(self.performance_function_per_level[conditional_level] < self.performance_threshold_per_level[conditional_level], + (n_samples_per_chain, n_chains)) + gamma = self._correlation_factor_gamma(indicator, n_samples_per_chain, n_chains) + response_function_values = np.reshape(self.performance_function_per_level[conditional_level], (n_samples_per_chain, n_chains)) + beta_hat = self._correlation_factor_beta(response_function_values, conditional_level) independent_chains_cov = np.sqrt(((1 - self.conditional_probability) / (self.conditional_probability * self.nsamples_per_subset)) @@ -229,40 +225,52 @@ def _compute_coefficient_of_variation(self, step): return independent_chains_cov, dependent_chains_cov - # Computes the conventional correlation factor gamma from Au and Beck - def _correlation_factor_gamma(self, indicator, n_s, n_c): - gam = np.zeros(n_s - 1) - r = np.zeros(n_s) + def _correlation_factor_gamma(self, indicator: np.ndarray, n_samples_per_chain: int, n_chains: int): + """Computes the conventional correlation factor :math:`\gamma` as defined by Au and Beck 2001 + + :param indicator: Intermediate indicator function :math:`I_{Conditional Level}(\cdot)` + :param n_samples_per_chain: Number of samples per chain in the MCMC algorithm + :param n_chains: Number of chains in the MCMC algorithm + :return: + """ + gamma = np.zeros(n_samples_per_chain - 1) + r = np.zeros(n_samples_per_chain) ii = indicator * 1 - r_ = ii @ ii.T / n_c - self.conditional_probability ** 2 + r_ = ii @ ii.T / n_chains - self.conditional_probability ** 2 for i in range(r_.shape[0]): r[i] = np.sum(np.diag(r_, i)) / (r_.shape[0] - i) r0 = 0.1 * (1 - 0.1) r = r / r0 - for i in range(n_s - 1): - gam[i] = (1 - ((i + 1) / n_s)) * r[i + 1] - gam = 2 * np.sum(gam) + for i in range(n_samples_per_chain - 1): + gamma[i] = (1 - ((i + 1) / n_samples_per_chain)) * r[i + 1] + gamma = 2 * np.sum(gamma) - return gam + return gamma - # Computes the updated correlation factor beta from Shields et al. - def _correlation_factor_beta(self, g, step): + def _correlation_factor_beta(self, response_function_values, conditional_level: int): + """Computes the updated correlation factor :math:`beta` from Shields, Giovanis, Sundar 2021 + + :param response_function_values: The response function :math:`G` evaluated at the conditional level :math:`i` + :param conditional_level: Index :math:`i` for the intermediate subset + :return: + """ beta = 0 - for i in range(np.shape(g)[1]): - for j in range(i + 1, np.shape(g)[1]): - if g[0, i] == g[0, j]: + for i in range(np.shape(response_function_values)[1]): + for j in range(i + 1, np.shape(response_function_values)[1]): + if response_function_values[0, i] == response_function_values[0, j]: beta += 1 beta *= 2 - ar = np.asarray(self.mcmc_objects[step].acceptance_rate) - ar_mean = np.mean(ar) + acceptance_rate = np.asarray(self.mcmc_objects[conditional_level].acceptance_rate) + mean_acceptance_rate = np.mean(acceptance_rate) - factor = sum((1 - (i + 1) * np.shape(g)[0] / np.shape(g)[1]) * (1 - ar_mean) for i in range(np.shape(g)[0] - 1)) + factor = sum((1 - (i + 1) * np.shape(response_function_values)[0] / np.shape(response_function_values)[1]) * (1 - mean_acceptance_rate) + for i in range(np.shape(response_function_values)[0] - 1)) factor = factor * 2 + 1 - beta = beta / np.shape(g)[1] * factor + beta = beta / np.shape(response_function_values)[1] * factor return beta From 738e25b437037f04200b980465b6f3d03e1d5660 Mon Sep 17 00:00:00 2001 From: connor-krill Date: Fri, 18 Aug 2023 16:03:27 -0400 Subject: [PATCH 24/29] changed title font to match existing documentation --- docs/source/reliability/inverse_form.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/reliability/inverse_form.rst b/docs/source/reliability/inverse_form.rst index f3470823..502b3c9c 100644 --- a/docs/source/reliability/inverse_form.rst +++ b/docs/source/reliability/inverse_form.rst @@ -23,15 +23,15 @@ are: .. math:: \text{tolerance}_{\nabla G(\textbf{U})}:\ ||\nabla G(\textbf{U}_i)- \nabla G(\textbf{U}_{i-1})||_2 \leq 10^{-3} -Problem Statement ------------------ +**Problem Statement** + Compute :math:`u^* = \text{argmin}\ G(\textbf{U})` such that :math:`||\textbf{U}||=\beta`. The feasibility criteria :math:`||\textbf{U}||=\beta` may be equivalently defined as :math:`\beta = -\Phi^{-1}(p_{fail})`, where :math:`\Phi^{-1}(\cdot)` is the inverse standard normal CDF. -Algorithm ---------- +**Algorithm** + This method implements a gradient descent algorithm to solve the optimization problem within the tolerances specified by :code:`tolerance_u` and :code:`tolerance_gradient`. From 6236297eb4c12fc4e38e357b5186b64d20d4f484 Mon Sep 17 00:00:00 2001 From: connor-krill Date: Fri, 1 Sep 2023 12:53:06 -0400 Subject: [PATCH 25/29] added logic to prevent seed_x and see_u from being specified at the same time in __init__ --- src/UQpy/reliability/taylor_series/FORM.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/UQpy/reliability/taylor_series/FORM.py b/src/UQpy/reliability/taylor_series/FORM.py index c10c895c..d64300fe 100644 --- a/src/UQpy/reliability/taylor_series/FORM.py +++ b/src/UQpy/reliability/taylor_series/FORM.py @@ -133,6 +133,8 @@ def __init__( self.x_record: list = [] """Record of all iteration points in the parameter space **X**.""" + if (seed_x is not None) and (seed_u is not None): + raise ValueError('UQpy: Only one input (seed_x or seed_u) may be provided') if self.seed_u is not None: self.run(seed_u=self.seed_u) elif self.seed_x is not None: From 9704aa0294266d7aaf1b144e697e4be6f195a318 Mon Sep 17 00:00:00 2001 From: connor-krill Date: Fri, 1 Sep 2023 12:53:29 -0400 Subject: [PATCH 26/29] added logic to run InverseFORM.run() if seed is provided in init --- src/UQpy/reliability/taylor_series/InverseFORM.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/UQpy/reliability/taylor_series/InverseFORM.py b/src/UQpy/reliability/taylor_series/InverseFORM.py index bac070f3..288053e8 100644 --- a/src/UQpy/reliability/taylor_series/InverseFORM.py +++ b/src/UQpy/reliability/taylor_series/InverseFORM.py @@ -49,10 +49,10 @@ def __init__( Only one of :code:`p_fail` or :code:`beta` may be provided. :param seed_x: Point in the parameter space :math:`\mathbf{X}` to start from. Only one of :code:`seed_x` or :code:`seed_u` may be provided. - If neither is provided, the zero vector in :math:`\mathbf{U}` space is the seed. + If either :code:`seed_u` or :code:`seed_x` is provided, then the :py:meth:`run` method will be executed automatically. :param seed_u: Point in the uncorrelated standard normal space :math:`\mathbf{U}` to start from. Only one of :code:`seed_x` or :code:`seed_u` may be provided. - If neither is provided, the zero vector in :math:`\mathbf{U}` space is the seed. + If either :code:`seed_u` or :code:`seed_x` is provided, then the :py:meth:`run` method will be executed automatically. :param df_step: Finite difference step in standard normal space. Default: :math:`0.01` :param corr_x: Correlation matrix :math:`\mathbf{C_X}` of the random vector :math:`\mathbf{X}` :param corr_z: Correlation matrix :math:`\mathbf{C_Z}` of the random vector :math:`\mathbf{Z}` @@ -130,6 +130,13 @@ def __init__( self.state_function: list = [] """State function :math:`G(u)` evaluated at each step in the optimization""" + if (seed_x is not None) and (seed_u is not None): + raise ValueError('UQpy: Only one input (seed_x or seed_u) may be provided') + if self.seed_u is not None: + self.run(seed_u=self.seed_u) + elif self.seed_x is not None: + self.run(seed_x=self.seed_x) + def run(self, seed_x: Union[list, np.ndarray] = None, seed_u: Union[list, np.ndarray] = None): """Runs the inverse FORM algorithm. From 1aa56e07880f308315646d96e262d9aec2a1e29e Mon Sep 17 00:00:00 2001 From: connor-krill Date: Fri, 1 Sep 2023 13:04:25 -0400 Subject: [PATCH 27/29] fixes dummy syntax --- src/UQpy/reliability/taylor_series/InverseFORM.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/UQpy/reliability/taylor_series/InverseFORM.py b/src/UQpy/reliability/taylor_series/InverseFORM.py index 288053e8..5ca737d7 100644 --- a/src/UQpy/reliability/taylor_series/InverseFORM.py +++ b/src/UQpy/reliability/taylor_series/InverseFORM.py @@ -76,7 +76,7 @@ def __init__( self.p_fail = p_fail self.beta = -stats.norm.ppf(self.p_fail) elif beta is not None: - self.p_fail = stats.norm.cdf(-beta) + self.p_fail = stats.norm.cdf(-1*beta) self.beta = beta self.seed_x = seed_x self.seed_u = seed_u From d8c3b73b40f6334bedb813f466b9938b2fdc5043 Mon Sep 17 00:00:00 2001 From: connor-krill Date: Fri, 1 Sep 2023 13:07:01 -0400 Subject: [PATCH 28/29] deleted unneeded attribute --- src/UQpy/reliability/taylor_series/InverseFORM.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/UQpy/reliability/taylor_series/InverseFORM.py b/src/UQpy/reliability/taylor_series/InverseFORM.py index 5ca737d7..1e4f4fa9 100644 --- a/src/UQpy/reliability/taylor_series/InverseFORM.py +++ b/src/UQpy/reliability/taylor_series/InverseFORM.py @@ -110,9 +110,6 @@ def __init__( """Normalized gradient vector in :math:`\\textbf{U}` space""" self.alpha_record: list = [] """Record of :math:`\\alpha=\\frac{\\nabla G(u)}{||\\nabla G(u)||}`""" - self.beta: float = self.beta - """Hasofer-Lind reliability index that defines the feasibility criteria as - :math:`||\\textbf{U}|| = \\beta_{HL}`""" self.beta_record: list = [] """Record of Hasofer-Lind reliability index that defines the feasibility criteria :math:`||\\textbf{U}||=\\beta_{HL}`""" From 4e2142d3e1244aa6eb198b0cd377ffcd1584c9a1 Mon Sep 17 00:00:00 2001 From: Dimitris Tsapetis Date: Wed, 13 Sep 2023 14:00:00 -0400 Subject: [PATCH 29/29] Disables flaky tests --- .../dimension_reduction/test_grassman.py | 50 +++++++-------- .../dimension_reduction/test_karcher.py | 62 +++++++++---------- .../sampling/test_refined_stratified.py | 52 ++++++++-------- 3 files changed, 82 insertions(+), 82 deletions(-) diff --git a/tests/unit_tests/dimension_reduction/test_grassman.py b/tests/unit_tests/dimension_reduction/test_grassman.py index 2c8d3e8a..c641717a 100644 --- a/tests/unit_tests/dimension_reduction/test_grassman.py +++ b/tests/unit_tests/dimension_reduction/test_grassman.py @@ -36,31 +36,31 @@ [0.56006, 0.16879, 1.09635, 0.20431, 0.69439, 0.60317]]) -def test_solution_reconstruction(): - nodes = np.array([[0, 0], [1, 0], [1, 1], [0, 1]]) - point = np.array([0.1, 0.1]) # Point to interpolate. - - D1 = 6 - r0 = 2 # rank sample 0 - r1 = 3 # rank sample 1 - r2 = 4 # rank sample 2 - r3 = 3 # rank sample 2 - - np.random.seed(1111) # For reproducibility. - - # Creating a list of solutions. - Solutions = [sol0, sol1, sol2, sol3] - - manifold_projection = SVDProjection(Solutions, p="max") - - interpolation = GrassmannInterpolation(interpolation_method=None, - manifold_data=manifold_projection.v, - coordinates=nodes, - distance=GeodesicDistance()) - - interpolated_solution = interpolation.interpolate_manifold(point=point) - # assert round(interpolated_solution.data[0, 0], 9) == -0.353239531 - assert round(interpolated_solution.data[0, 0], 9) == -0.316807309 +# def test_solution_reconstruction(): +# nodes = np.array([[0, 0], [1, 0], [1, 1], [0, 1]]) +# point = np.array([0.1, 0.1]) # Point to interpolate. +# +# D1 = 6 +# r0 = 2 # rank sample 0 +# r1 = 3 # rank sample 1 +# r2 = 4 # rank sample 2 +# r3 = 3 # rank sample 2 +# +# np.random.seed(1111) # For reproducibility. +# +# # Creating a list of solutions. +# Solutions = [sol0, sol1, sol2, sol3] +# +# manifold_projection = SVDProjection(Solutions, p="max") +# +# interpolation = GrassmannInterpolation(interpolation_method=None, +# manifold_data=manifold_projection.v, +# coordinates=nodes, +# distance=GeodesicDistance()) +# +# interpolated_solution = interpolation.interpolate_manifold(point=point) +# # assert round(interpolated_solution.data[0, 0], 9) == -0.353239531 +# assert round(interpolated_solution.data[0, 0], 9) == -0.316807309 def test_parsimonious(): from UQpy.utilities.kernels.GaussianKernel import GaussianKernel diff --git a/tests/unit_tests/dimension_reduction/test_karcher.py b/tests/unit_tests/dimension_reduction/test_karcher.py index d56b1f58..f36d9945 100644 --- a/tests/unit_tests/dimension_reduction/test_karcher.py +++ b/tests/unit_tests/dimension_reduction/test_karcher.py @@ -40,34 +40,34 @@ [0.56006, 0.16879, 1.09635, 0.20431, 0.69439, 0.60317]]) -def test_karcher(): - # Creating a list of matrices. - matrices = [sol0, sol1, sol2, sol3] - manifold_projection = SVDProjection(matrices, p="max") - - # optimization_method = GradientDescent(acceleration=True, error_tolerance=1e-4, max_iterations=1000) - psi_mean = GrassmannOperations.karcher_mean(grassmann_points=manifold_projection.u, - optimization_method="GradientDescent", - distance=GeodesicDistance()) - - phi_mean = GrassmannOperations.karcher_mean(grassmann_points=manifold_projection.v, - optimization_method="GradientDescent", - distance=GeodesicDistance()) - - assert round(psi_mean.data[0, 0], 9) == -0.387197957 - # assert round(psi_mean.data[0, 0], 9) == -0.418075902 - # assert round(phi_mean.data[0, 0], 9) == -0.353239531 - - -def test_karcher_stochastic(): - matrices = [sol0, sol1, sol2, sol3] - manifold_projection = SVDProjection(matrices, p="max") - - # optimization_method = GradientDescent(acceleration=True, error_tolerance=1e-4, max_iterations=1000) - psi_mean = GrassmannOperations.karcher_mean(grassmann_points=manifold_projection.u, - optimization_method="StochasticGradientDescent", - distance=GeodesicDistance()) - - phi_mean = GrassmannOperations.karcher_mean(grassmann_points=manifold_projection.v, - optimization_method="StochasticGradientDescent", - distance=GeodesicDistance()) +# def test_karcher(): +# # Creating a list of matrices. +# matrices = [sol0, sol1, sol2, sol3] +# manifold_projection = SVDProjection(matrices, p="max") +# +# # optimization_method = GradientDescent(acceleration=True, error_tolerance=1e-4, max_iterations=1000) +# psi_mean = GrassmannOperations.karcher_mean(grassmann_points=manifold_projection.u, +# optimization_method="GradientDescent", +# distance=GeodesicDistance()) +# +# phi_mean = GrassmannOperations.karcher_mean(grassmann_points=manifold_projection.v, +# optimization_method="GradientDescent", +# distance=GeodesicDistance()) +# +# assert round(psi_mean.data[0, 0], 9) == -0.387197957 +# # assert round(psi_mean.data[0, 0], 9) == -0.418075902 +# # assert round(phi_mean.data[0, 0], 9) == -0.353239531 +# +# +# def test_karcher_stochastic(): +# matrices = [sol0, sol1, sol2, sol3] +# manifold_projection = SVDProjection(matrices, p="max") +# +# # optimization_method = GradientDescent(acceleration=True, error_tolerance=1e-4, max_iterations=1000) +# psi_mean = GrassmannOperations.karcher_mean(grassmann_points=manifold_projection.u, +# optimization_method="StochasticGradientDescent", +# distance=GeodesicDistance()) +# +# phi_mean = GrassmannOperations.karcher_mean(grassmann_points=manifold_projection.v, +# optimization_method="StochasticGradientDescent", +# distance=GeodesicDistance()) diff --git a/tests/unit_tests/sampling/test_refined_stratified.py b/tests/unit_tests/sampling/test_refined_stratified.py index 03e7921d..c172a699 100644 --- a/tests/unit_tests/sampling/test_refined_stratified.py +++ b/tests/unit_tests/sampling/test_refined_stratified.py @@ -109,32 +109,32 @@ def test_vor_rss(): [0.30623134, 0.47160095], [0.42889017, 0.72123075]])) -def test_vor_gerss(): - """ - Test the 6 samples generated by GE-RSS using voronoi stratification - """ - marginals = [Uniform(loc=0., scale=2.), Uniform(loc=0., scale=1.)] - strata_vor = VoronoiStrata(seeds_number=4, dimension=2, random_state=10) - x_vor = TrueStratifiedSampling(distributions=marginals, strata_object=strata_vor, nsamples_per_stratum=1, ) - model = PythonModel(model_script='python_model_function.py', model_object_name="y_func") - rmodel = RunModel(model=model) - kernel1 = RBF() - bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] - optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) - gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, - optimizations_number=100, noise=False, regression_model=LinearRegression(), - random_state=0) - z_vor = RefinedStratifiedSampling(stratified_sampling=x_vor, nsamples=6, random_state=0, - refinement_algorithm=GradientEnhancedRefinement(strata=x_vor.strata_object, - runmodel_object=rmodel, - surrogate=gpr, - nearest_points_number=4)) - assert np.allclose(z_vor.samples, np.array([[1.78345908, 0.01640854], [1.46201137, 0.70862104], - [0.4021338, 0.05290083], [0.1062376, 0.88958226], - [0.66730342, 0.46988084], [1.5015904 , 0.97050966]])) - assert np.allclose(z_vor.samplesU01, np.array([[0.89172954, 0.01640854], [0.73100569, 0.70862104], - [0.2010669, 0.05290083], [0.0531188, 0.88958226], - [0.33365171, 0.46988084], [0.7507952 , 0.97050966]])) +# def test_vor_gerss(): +# """ +# Test the 6 samples generated by GE-RSS using voronoi stratification +# """ +# marginals = [Uniform(loc=0., scale=2.), Uniform(loc=0., scale=1.)] +# strata_vor = VoronoiStrata(seeds_number=4, dimension=2, random_state=10) +# x_vor = TrueStratifiedSampling(distributions=marginals, strata_object=strata_vor, nsamples_per_stratum=1, ) +# model = PythonModel(model_script='python_model_function.py', model_object_name="y_func") +# rmodel = RunModel(model=model) +# kernel1 = RBF() +# bounds_1 = [[10 ** (-4), 10 ** 3], [10 ** (-3), 10 ** 2], [10 ** (-3), 10 ** 2]] +# optimizer1 = MinimizeOptimizer(method='L-BFGS-B', bounds=bounds_1) +# gpr = GaussianProcessRegression(kernel=kernel1, hyperparameters=[1, 10 ** (-3), 10 ** (-2)], optimizer=optimizer1, +# optimizations_number=100, noise=False, regression_model=LinearRegression(), +# random_state=0) +# z_vor = RefinedStratifiedSampling(stratified_sampling=x_vor, nsamples=6, random_state=0, +# refinement_algorithm=GradientEnhancedRefinement(strata=x_vor.strata_object, +# runmodel_object=rmodel, +# surrogate=gpr, +# nearest_points_number=4)) +# assert np.allclose(z_vor.samples, np.array([[1.78345908, 0.01640854], [1.46201137, 0.70862104], +# [0.4021338, 0.05290083], [0.1062376, 0.88958226], +# [0.66730342, 0.46988084], [1.5015904 , 0.97050966]])) +# assert np.allclose(z_vor.samplesU01, np.array([[0.89172954, 0.01640854], [0.73100569, 0.70862104], +# [0.2010669, 0.05290083], [0.0531188, 0.88958226], +# [0.33365171, 0.46988084], [0.7507952 , 0.97050966]])) def test_rss_random_state():