diff --git a/README.md b/README.md index b6fa94232..bba807333 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ pip install git+https://github.com/SMTOrg/smt.git@master ``` # Usage -For examples demonstrating how to use SMT, you can take a look at the [tutorial notebook](tutorial/SMT_Tutorial.ipynb) or go to the 'smt/examples' folder. +For examples demonstrating how to use SMT, you can take a look at the [tutorial notebooks](https://github.com/SMTorg/smt/tree/master/tutorial#readme) or go to the 'smt/examples' folder. # Documentation [Documentation of Surrogate Modeling Toolbox](http://smt.readthedocs.io/en/stable). diff --git a/doc/_src_docs/applications.rst b/doc/_src_docs/applications.rst index 2a3845727..148514cdc 100644 --- a/doc/_src_docs/applications.rst +++ b/doc/_src_docs/applications.rst @@ -8,12 +8,14 @@ Three methods are available: :maxdepth: 1 :titlesonly: + applications/Mixed_Hier_usage + applications/Mixed_Hier_surr applications/moe applications/vfm applications/mfk applications/mfkpls applications/mfkplsk applications/ego - applications/mixed_integer + The intent is to provide applications of surrogate models in higher level methods. diff --git a/doc/_src_docs/applications.rstx b/doc/_src_docs/applications.rstx index f67a71ebb..eb81c1ecd 100644 --- a/doc/_src_docs/applications.rstx +++ b/doc/_src_docs/applications.rstx @@ -8,12 +8,14 @@ Three methods are available: :maxdepth: 1 :titlesonly: + applications/Mixed_Hier_usage + applications/Mixed_Hier_surr applications/moe applications/vfm applications/mfk applications/mfkpls applications/mfkplsk applications/ego - applications/mixed_integer + The intent is to provide applications of surrogate models in higher level methods. diff --git a/doc/_src_docs/applications/mixed_integer.rst b/doc/_src_docs/applications/Mixed_Hier_surr.rst similarity index 54% rename from doc/_src_docs/applications/mixed_integer.rst rename to doc/_src_docs/applications/Mixed_Hier_surr.rst index 5ed5aa7e7..362fcebfc 100644 --- a/doc/_src_docs/applications/mixed_integer.rst +++ b/doc/_src_docs/applications/Mixed_Hier_surr.rst @@ -1,155 +1,4 @@ -.. _Mixed-Integer Sampling and and Variables Types Specifications: - -Mixed-Integer usage (Variables, Sampling and Context) -===================================================== - -SMT provides the ``mixed_integer`` module to adapt existing surrogates to deal with categorical (or enumerate) and ordered variables using continuous relaxation. -For ordered variables, the values are rounded to the nearest values from a provided list. If, instead, bounds are provided, the list will consist of all integers between those bounds. - -The user specifies x feature types through a list of types to be either: - -- ``FLOAT``: a continuous feature, -- ``ORD``: an ordered valued feature, -- or a tuple ``(ENUM, n)`` where n is the number of levels of the catagorical feature (i.e. an enumerate with n values) - -In the case of mixed integer sampling, bounds of each x feature have to be adapted to take into account feature types. While ``FLOAT`` and ``ORD`` feature still have an interval [lower bound, upper bound], the ``ENUM`` features bounds is defined by giving the enumeration/list of possible values (levels). - -For instance, if we have the following ``xtypes``: ``[FLOAT, ORD, (ENUM, 2), (ENUM, 3)]``, a compatible ``xlimits`` could be ``[[0., 4], [-10, 10], ["blue", "red"], ["short", "medium", "long"]]``. - -However, the functioning of ``ORD`` is twofold. As previously mentioned, it can be used like [lower bound, upper bound], in this case [0,5] will corresponds to [0,1,2,3,4,5]. But, on the other hand, ``ORD`` can be used as an enumeration/list of possible values (levels), in this case ["0","5","6"] will corresponds to [0,5,6]. - - -Mixed integer sampling method ------------------------------ - -To use a sampling method with mixed integer typed features, the user instanciates a ``MixedIntegerSamplingMethod`` with a given sampling method. -The ``MixedIntegerSamplingMethod`` implements the ``SamplingMethod`` interface and decorates the original sampling method to provide a DOE while conforming to integer and categorical types. - -Example of mixed-integer LHS sampling method -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - from matplotlib import colors - - from smt.sampling_methods import LHS - from smt.applications.mixed_integer import ( - FLOAT, - ORD, - ENUM, - MixedIntegerSamplingMethod, - ) - - xtypes = [FLOAT, (ENUM, 2)] - xlimits = [[0.0, 4.0], ["blue", "red"]] - sampling = MixedIntegerSamplingMethod(xtypes, xlimits, LHS, criterion="ese") - - num = 40 - x = sampling(num) - - cmap = colors.ListedColormap(xlimits[1]) - plt.scatter(x[:, 0], np.zeros(num), c=x[:, 1], cmap=cmap) - plt.show() - -.. figure:: mixed_integer_TestMixedInteger_run_mixed_integer_lhs_example.png - :scale: 80 % - :align: center - -Mixed integer context ---------------------- - -the ``MixedIntegerContext`` class helps the user to use mixed integer sampling methods and surrogate models consistently by acting as a factory for those objects given a x specification: (xtypes, xlimits). - - .. autoclass:: smt.applications.mixed_integer.MixedIntegerContext - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.__init__ - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.build_sampling_method - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.build_surrogate_model - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.cast_to_discrete_values - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.fold_with_enum_index - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.unfold_with_enum_mask - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.cast_to_mixed_integer - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.cast_to_enum_value - -Example of mixed-integer context usage -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - from matplotlib import colors - from mpl_toolkits.mplot3d import Axes3D - - from smt.surrogate_models import KRG - from smt.sampling_methods import LHS, Random - from smt.applications.mixed_integer import MixedIntegerContext, FLOAT, ORD, ENUM - - xtypes = [ORD, FLOAT, (ENUM, 4)] - xlimits = [[0, 5], [0.0, 4.0], ["blue", "red", "green", "yellow"]] - - def ftest(x): - return (x[:, 0] * x[:, 0] + x[:, 1] * x[:, 1]) * (x[:, 2] + 1) - - # context to create consistent DOEs and surrogate - mixint = MixedIntegerContext(xtypes, xlimits) - - # DOE for training - lhs = mixint.build_sampling_method(LHS, criterion="ese") - - num = mixint.get_unfolded_dimension() * 5 - print("DOE point nb = {}".format(num)) - xt = lhs(num) - yt = ftest(xt) - - # Surrogate - sm = mixint.build_surrogate_model(KRG()) - sm.set_training_values(xt, yt) - sm.train() - - # DOE for validation - rand = mixint.build_sampling_method(Random) - xv = rand(50) - yv = ftest(xv) - yp = sm.predict_values(xv) - - plt.plot(yv, yv) - plt.plot(yv, yp, "o") - plt.xlabel("actual") - plt.ylabel("prediction") - - plt.show() - -:: - - DOE point nb = 30 - ___________________________________________________________________________ - - Evaluation - - # eval points. : 50 - - Predicting ... - Predicting - done. Time (sec): 0.0000000 - - Prediction time/pt. (sec) : 0.0000000 - - -.. figure:: mixed_integer_TestMixedInteger_run_mixed_integer_context_example.png - :scale: 80 % - :align: center - - -.. _Mixed-Integer Surrogates: +.. _Mixed Integer and hierarchical Surrogates: Mixed integer surrogate ======================= @@ -158,14 +7,16 @@ To use a surrogate with mixed integer constraints, the user instanciates a ``Mix The ``MixedIntegerSurrogateModel`` implements the ``SurrogateModel`` interface and decorates the given surrogate while respecting integer and categorical types. They are various surrogate models implemented that are described below. -Mixed-Integer Surrogate with Continuous Relaxation +For Kriging models, several methods to construct the mixed categorical correlation kernel are implemented. As a consequence, the user can instanciates a ``MixedIntegerKrigingeModel`` with the given kernel for Kriging. + +Mixed Integer Surrogate with Continuous Relaxation -------------------------------------------------- For enum variables, as many x features are added as there is enumerated levels for the variables. These new dimensions have [0, 1] bounds and the max of these feature float values will correspond to the choice of one the enum value: this is the so-called "one-hot encoding". For instance, for a categorical variable (one feature of x) with three levels ["blue", "red", "green"], 3 continuous float features x0, x1, x2 are created. Thereafter, the value max(x0, x1, x2), for instance, x1, will give "red" as the value for the original categorical feature. Details can be found in [1]_ . -Example of mixed-integer Polynomial (QP) surrogate +Example of mixed integer Polynomial (QP) surrogate ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python @@ -173,19 +24,19 @@ Example of mixed-integer Polynomial (QP) surrogate import numpy as np import matplotlib.pyplot as plt - from smt.surrogate_models import QP - from smt.applications.mixed_integer import MixedIntegerSurrogateModel, ORD + from smt.surrogate_models import QP, XType, XSpecs + from smt.applications.mixed_integer import MixedIntegerSurrogateModel xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) yt = np.array([0.0, 1.0, 1.5, 0.5, 1.0]) - # xtypes = [FLOAT, ORD, (ENUM, 3), (ENUM, 2)] - # FLOAT means x1 continuous - # ORD means x2 ordered + # xtypes = [XType.FLOAT, XType.ORD, (ENUM, 3), (ENUM, 2)] + # XType.FLOAT means x1 continuous + # XType.ORD means x2 ordered # (ENUM, 3) means x3, x4 & x5 are 3 levels of the same categorical variable # (ENUM, 2) means x6 & x7 are 2 levels of the same categorical variable - - sm = MixedIntegerSurrogateModel(xtypes=[ORD], xlimits=[[0, 4]], surrogate=QP()) + xspecs = XSpecs(xtypes=[XType.ORD], xlimits=[[0, 4]]) + sm = MixedIntegerSurrogateModel(xspecs=xspecs, surrogate=QP()) sm.set_training_values(xt, yt) sm.train() @@ -214,21 +65,21 @@ Example of mixed-integer Polynomial (QP) surrogate Prediction time/pt. (sec) : 0.0000000 -.. figure:: mixed_integer_TestMixedInteger_run_mixed_integer_qp_example.png +.. figure:: Mixed_Hier_surr_TestMixedInteger_run_mixed_integer_qp_example.png :scale: 80 % :align: center -Mixed-Integer Surrogate with Gower Distance -------------------------------------------- +Mixed Integer Kriging with Gower Distance +----------------------------------------- -Another implemented method is using a basic mixed integer kernel based on the Gower distance between two points. +Another implemented method to tackle mixed integer with Kriging is using a basic mixed integer kernel based on the Gower distance between two points. When constructing the correlation kernel, the distance is redefined as :math:`\Delta= \Delta_{cont} + \Delta_{cat}`, with :math:`\Delta_{cont}` the continuous distance as usual and :math:`\Delta_ {cat}` the categorical distance defined as the number of categorical variables that differs from one point to another. For example, the Gower Distance between ``[1,'red', 'medium']`` and ``[1.2,'red', 'large']`` is :math:`\Delta= 0.2+ (0` ``'red'`` :math:`=` ``'red'`` :math:`+ 1` ``'medium'`` :math:`\neq` ``'large'`` ) :math:`=1.2`. With this distance, a mixed integer kernel can be build. Details can be found in [1]_ . -Example of mixed-integer Gower Distance model +Example of mixed integer Gower Distance model ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python @@ -236,34 +87,32 @@ Example of mixed-integer Gower Distance model import numpy as np import matplotlib.pyplot as plt - from smt.surrogate_models import KRG, KPLS - from smt.applications.mixed_integer import ( - MixedIntegerSurrogateModel, - ENUM, - ORD, - FLOAT, - GOWER_KERNEL, - ) + from smt.surrogate_models import KRG, XType, XSpecs, MixIntKernelType + from smt.applications.mixed_integer import MixedIntegerKrigingModel xt1 = np.array([[0, 0.0], [0, 2.0], [0, 4.0]]) xt2 = np.array([[1, 0.0], [1, 2.0], [1, 3.0]]) xt3 = np.array([[2, 1.0], [2, 2.0], [2, 4.0]]) xt = np.concatenate((xt1, xt2, xt3), axis=0) - xt[:, 1] = xt[:, 1].astype(np.float) + xt[:, 1] = xt[:, 1].astype(np.float64) yt1 = np.array([0.0, 9.0, 16.0]) yt2 = np.array([0.0, -4, -13.0]) yt3 = np.array([-10, 3, 11.0]) yt = np.concatenate((yt1, yt2, yt3), axis=0) xlimits = [["Blue", "Red", "Green"], [0.0, 4.0]] - xtypes = [(ENUM, 3), FLOAT] + xtypes = [(XType.ENUM, 3), XType.FLOAT] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) # Surrogate - sm = MixedIntegerSurrogateModel( - categorical_kernel=GOWER_KERNEL, - xtypes=xtypes, - xlimits=xlimits, - surrogate=KRG(theta0=[1e-1], corr="squar_exp", n_start=20), + sm = MixedIntegerKrigingModel( + surrogate=KRG( + xspecs=xspecs, + categorical_kernel=MixIntKernelType.GOWER, + theta0=[1e-1], + corr="squar_exp", + n_start=20, + ), ) sm.set_training_values(xt, yt) sm.train() @@ -301,7 +150,7 @@ Example of mixed-integer Gower Distance model fig, axs = plt.subplots(3, figsize=(8, 6)) - axs[0].plot(xt1[:, 1].astype(np.float), yt1, "o", linestyle="None") + axs[0].plot(xt1[:, 1].astype(np.float64), yt1, "o", linestyle="None") axs[0].plot(x_cont, y1, color="Blue") axs[0].fill_between( np.ravel(x_cont), @@ -317,7 +166,7 @@ Example of mixed-integer Gower Distance model bbox_to_anchor=[0, 1], ) axs[1].plot( - xt2[:, 1].astype(np.float), yt2, marker="o", color="r", linestyle="None" + xt2[:, 1].astype(np.float64), yt2, marker="o", color="r", linestyle="None" ) axs[1].plot(x_cont, y2, color="Red") axs[1].fill_between( @@ -334,7 +183,7 @@ Example of mixed-integer Gower Distance model bbox_to_anchor=[0, 1], ) axs[2].plot( - xt3[:, 1].astype(np.float), yt3, marker="o", color="r", linestyle="None" + xt3[:, 1].astype(np.float64), yt3, marker="o", color="r", linestyle="None" ) axs[2].plot(x_cont, y3, color="Green") axs[2].fill_between( @@ -362,9 +211,9 @@ Example of mixed-integer Gower Distance model # eval points. : 100 Predicting ... - Predicting - done. Time (sec): 0.0059862 + Predicting - done. Time (sec): 0.0079916 - Prediction time/pt. (sec) : 0.0000599 + Prediction time/pt. (sec) : 0.0000799 ___________________________________________________________________________ @@ -373,9 +222,9 @@ Example of mixed-integer Gower Distance model # eval points. : 100 Predicting ... - Predicting - done. Time (sec): 0.0049839 + Predicting - done. Time (sec): 0.0069785 - Prediction time/pt. (sec) : 0.0000498 + Prediction time/pt. (sec) : 0.0000698 ___________________________________________________________________________ @@ -384,58 +233,55 @@ Example of mixed-integer Gower Distance model # eval points. : 100 Predicting ... - Predicting - done. Time (sec): 0.0049872 + Predicting - done. Time (sec): 0.0079403 - Prediction time/pt. (sec) : 0.0000499 + Prediction time/pt. (sec) : 0.0000794 -.. figure:: mixed_integer_TestMixedInteger_test_mixed_gower.png +.. figure:: Mixed_Hier_surr_TestMixedInteger_run_mixed_gower_example.png :scale: 80 % :align: center +Mixed Integer Kriging with Group Kernel (Homoscedastic Hypersphere) +------------------------------------------------------------------- -Mixed-Integer Surrogate with Group Kernel (Homoscedastic Hypersphere) ---------------------------------------------------------------------- - -This surrogate model consider that the correlation kernel between the levels of a given variable is a symmetric positive definite matrix. The latter matrix is estimated through an hypersphere parametrization depending on several hyperparameters. To finish with, the data correlation matrix is build as the product of the correlation matrices over the various variables. Details can be found in [1]_ . Note that this model is the only one to consider negative correlations between levels ("blue" can be correlated negatively to "red"). +This surrogate model assumes that the correlation kernel between the levels of a given variable is a symmetric positive definite matrix. The latter matrix is estimated through an hypersphere parametrization depending on several hyperparameters. To finish with, the data correlation matrix is build as the product of the correlation matrices over the various variables. Details can be found in [1]_ . Note that this model is the only one to consider negative correlations between levels ("blue" can be correlated negatively to "red"). -Example of mixed-integer Homoscedastic Hypersphere model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Example of mixed integer Homoscedastic Hypersphere model +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python import numpy as np import matplotlib.pyplot as plt - from smt.surrogate_models import KRG, KPLS - from smt.applications.mixed_integer import ( - MixedIntegerSurrogateModel, - ENUM, - ORD, - FLOAT, - HOMO_HSPHERE_KERNEL, - ) + from smt.surrogate_models import KRG, XType, XSpecs, MixIntKernelType + from smt.applications.mixed_integer import MixedIntegerKrigingModel xt1 = np.array([[0, 0.0], [0, 2.0], [0, 4.0]]) xt2 = np.array([[1, 0.0], [1, 2.0], [1, 3.0]]) xt3 = np.array([[2, 1.0], [2, 2.0], [2, 4.0]]) xt = np.concatenate((xt1, xt2, xt3), axis=0) - xt[:, 1] = xt[:, 1].astype(np.float) + xt[:, 1] = xt[:, 1].astype(np.float64) yt1 = np.array([0.0, 9.0, 16.0]) yt2 = np.array([0.0, -4, -13.0]) yt3 = np.array([-10, 3, 11.0]) yt = np.concatenate((yt1, yt2, yt3), axis=0) xlimits = [["Blue", "Red", "Green"], [0.0, 4.0]] - xtypes = [(ENUM, 3), FLOAT] + xtypes = [(XType.ENUM, 3), XType.FLOAT] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) # Surrogate - sm = MixedIntegerSurrogateModel( - categorical_kernel=HOMO_HSPHERE_KERNEL, - xtypes=xtypes, - xlimits=xlimits, - surrogate=KRG(theta0=[1e-1], corr="squar_exp", n_start=20), + sm = MixedIntegerKrigingModel( + surrogate=KRG( + xspecs=xspecs, + categorical_kernel=MixIntKernelType.HOMO_HSPHERE, + theta0=[1e-1], + corr="squar_exp", + n_start=20, + ), ) sm.set_training_values(xt, yt) sm.train() @@ -473,7 +319,7 @@ Example of mixed-integer Homoscedastic Hypersphere model fig, axs = plt.subplots(3, figsize=(8, 6)) - axs[0].plot(xt1[:, 1].astype(np.float), yt1, "o", linestyle="None") + axs[0].plot(xt1[:, 1].astype(np.float64), yt1, "o", linestyle="None") axs[0].plot(x_cont, y1, color="Blue") axs[0].fill_between( np.ravel(x_cont), @@ -489,7 +335,7 @@ Example of mixed-integer Homoscedastic Hypersphere model bbox_to_anchor=[0, 1], ) axs[1].plot( - xt2[:, 1].astype(np.float), yt2, marker="o", color="r", linestyle="None" + xt2[:, 1].astype(np.float64), yt2, marker="o", color="r", linestyle="None" ) axs[1].plot(x_cont, y2, color="Red") axs[1].fill_between( @@ -506,7 +352,7 @@ Example of mixed-integer Homoscedastic Hypersphere model bbox_to_anchor=[0, 1], ) axs[2].plot( - xt3[:, 1].astype(np.float), yt3, marker="o", color="r", linestyle="None" + xt3[:, 1].astype(np.float64), yt3, marker="o", color="r", linestyle="None" ) axs[2].plot(x_cont, y3, color="Green") axs[2].fill_between( @@ -534,9 +380,9 @@ Example of mixed-integer Homoscedastic Hypersphere model # eval points. : 100 Predicting ... - Predicting - done. Time (sec): 0.0069799 + Predicting - done. Time (sec): 0.0089796 - Prediction time/pt. (sec) : 0.0000698 + Prediction time/pt. (sec) : 0.0000898 ___________________________________________________________________________ @@ -545,9 +391,9 @@ Example of mixed-integer Homoscedastic Hypersphere model # eval points. : 100 Predicting ... - Predicting - done. Time (sec): 0.0059888 + Predicting - done. Time (sec): 0.0089800 - Prediction time/pt. (sec) : 0.0000599 + Prediction time/pt. (sec) : 0.0000898 ___________________________________________________________________________ @@ -556,22 +402,22 @@ Example of mixed-integer Homoscedastic Hypersphere model # eval points. : 100 Predicting ... - Predicting - done. Time (sec): 0.0069888 + Predicting - done. Time (sec): 0.0089800 - Prediction time/pt. (sec) : 0.0000699 + Prediction time/pt. (sec) : 0.0000898 -.. figure:: mixed_integer_TestMixedInteger_test_mixed_homo_hyp.png +.. figure:: Mixed_Hier_surr_TestMixedInteger_run_mixed_homo_hyp_example.png :scale: 80 % :align: center -Mixed-Integer Surrogate with Exponential Homoscedastic Hypersphere ------------------------------------------------------------------- +Mixed Integer Kriging with Exponential Homoscedastic Hypersphere +---------------------------------------------------------------- This surrogate model also consider that the correlation kernel between the levels of a given variable is a symmetric positive definite matrix. The latter matrix is estimated through an hypersphere parametrization depending on several hyperparameters. Thereafter, an exponential kernel is applied to the matrix. To finish with, the data correlation matrix is build as the product of the correlation matrices over the various variables. Therefore, this model could not model negative correlation and only works with absolute exponential and Gaussian kernels. Details can be found in [1]_ . -Example of mixed-integer Exponential Homoscedastic Hypersphere model +Example of mixed integer Exponential Homoscedastic Hypersphere model ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: python @@ -579,34 +425,32 @@ Example of mixed-integer Exponential Homoscedastic Hypersphere model import numpy as np import matplotlib.pyplot as plt - from smt.surrogate_models import KRG, KPLS - from smt.applications.mixed_integer import ( - MixedIntegerSurrogateModel, - ENUM, - ORD, - FLOAT, - EXP_HOMO_HSPHERE_KERNEL, - ) + from smt.surrogate_models import KRG, XType, XSpecs, MixIntKernelType + from smt.applications.mixed_integer import MixedIntegerKrigingModel xt1 = np.array([[0, 0.0], [0, 2.0], [0, 4.0]]) xt2 = np.array([[1, 0.0], [1, 2.0], [1, 3.0]]) xt3 = np.array([[2, 1.0], [2, 2.0], [2, 4.0]]) xt = np.concatenate((xt1, xt2, xt3), axis=0) - xt[:, 1] = xt[:, 1].astype(np.float) + xt[:, 1] = xt[:, 1].astype(np.float64) yt1 = np.array([0.0, 9.0, 16.0]) yt2 = np.array([0.0, -4, -13.0]) yt3 = np.array([-10, 3, 11.0]) yt = np.concatenate((yt1, yt2, yt3), axis=0) xlimits = [["Blue", "Red", "Green"], [0.0, 4.0]] - xtypes = [(ENUM, 3), FLOAT] + xtypes = [(XType.ENUM, 3), XType.FLOAT] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) # Surrogate - sm = MixedIntegerSurrogateModel( - categorical_kernel=EXP_HOMO_HSPHERE_KERNEL, - xtypes=xtypes, - xlimits=xlimits, - surrogate=KRG(theta0=[1e-1], corr="squar_exp", n_start=20), + sm = MixedIntegerKrigingModel( + surrogate=KRG( + xspecs=xspecs, + theta0=[1e-1], + corr="squar_exp", + n_start=20, + categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE, + ), ) sm.set_training_values(xt, yt) sm.train() @@ -644,7 +488,7 @@ Example of mixed-integer Exponential Homoscedastic Hypersphere model fig, axs = plt.subplots(3, figsize=(8, 6)) - axs[0].plot(xt1[:, 1].astype(np.float), yt1, "o", linestyle="None") + axs[0].plot(xt1[:, 1].astype(np.float64), yt1, "o", linestyle="None") axs[0].plot(x_cont, y1, color="Blue") axs[0].fill_between( np.ravel(x_cont), @@ -660,7 +504,7 @@ Example of mixed-integer Exponential Homoscedastic Hypersphere model bbox_to_anchor=[0, 1], ) axs[1].plot( - xt2[:, 1].astype(np.float), yt2, marker="o", color="r", linestyle="None" + xt2[:, 1].astype(np.float64), yt2, marker="o", color="r", linestyle="None" ) axs[1].plot(x_cont, y2, color="Red") axs[1].fill_between( @@ -677,7 +521,7 @@ Example of mixed-integer Exponential Homoscedastic Hypersphere model bbox_to_anchor=[0, 1], ) axs[2].plot( - xt3[:, 1].astype(np.float), yt3, marker="o", color="r", linestyle="None" + xt3[:, 1].astype(np.float64), yt3, marker="o", color="r", linestyle="None" ) axs[2].plot(x_cont, y3, color="Green") axs[2].fill_between( @@ -705,9 +549,9 @@ Example of mixed-integer Exponential Homoscedastic Hypersphere model # eval points. : 100 Predicting ... - Predicting - done. Time (sec): 0.0063519 + Predicting - done. Time (sec): 0.0089765 - Prediction time/pt. (sec) : 0.0000635 + Prediction time/pt. (sec) : 0.0000898 ___________________________________________________________________________ @@ -716,9 +560,9 @@ Example of mixed-integer Exponential Homoscedastic Hypersphere model # eval points. : 100 Predicting ... - Predicting - done. Time (sec): 0.0069222 + Predicting - done. Time (sec): 0.0099738 - Prediction time/pt. (sec) : 0.0000692 + Prediction time/pt. (sec) : 0.0000997 ___________________________________________________________________________ @@ -727,16 +571,231 @@ Example of mixed-integer Exponential Homoscedastic Hypersphere model # eval points. : 100 Predicting ... - Predicting - done. Time (sec): 0.0060115 + Predicting - done. Time (sec): 0.0089769 - Prediction time/pt. (sec) : 0.0000601 + Prediction time/pt. (sec) : 0.0000898 -.. figure:: mixed_integer_TestMixedInteger_test_mixed_homo_gaussian.png +.. figure:: Mixed_Hier_surr_TestMixedInteger_run_mixed_homo_gaussian_example.png :scale: 80 % :align: center +Mixed Integer Kriging with hierarchical variables +------------------------------------------------- + +The class ``XSpecs`` implements the roles, variables and types of the variables. Therefore, by specifying the variables, a ``MixedIntegerKrigingeModel`` for both Hierarchical and mixed categorical variables can be build. More details are given in the usage section. + +Example of mixed integer Kriging with hierarchical variables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + import numpy as np + from smt.utils.kriging import XSpecs + from smt.applications.mixed_integer import ( + MixedIntegerContext, + MixedIntegerSamplingMethod, + MixedIntegerKrigingModel, + ) + from smt.sampling_methods import LHS + from smt.surrogate_models import ( + KRG, + KPLS, + QP, + XType, + XRole, + MixIntKernelType, + ) + + def f_hv(X): + import numpy as np + + def H(x1, x2, x3, x4, z3, z4, x5, cos_term): + import numpy as np + + h = ( + 53.3108 + + 0.184901 * x1 + - 5.02914 * x1**3 * 10 ** (-6) + + 7.72522 * x1**z3 * 10 ** (-8) + - 0.0870775 * x2 + - 0.106959 * x3 + + 7.98772 * x3**z4 * 10 ** (-6) + + 0.00242482 * x4 + + 1.32851 * x4**3 * 10 ** (-6) + - 0.00146393 * x1 * x2 + - 0.00301588 * x1 * x3 + - 0.00272291 * x1 * x4 + + 0.0017004 * x2 * x3 + + 0.0038428 * x2 * x4 + - 0.000198969 * x3 * x4 + + 1.86025 * x1 * x2 * x3 * 10 ** (-5) + - 1.88719 * x1 * x2 * x4 * 10 ** (-6) + + 2.50923 * x1 * x3 * x4 * 10 ** (-5) + - 5.62199 * x2 * x3 * x4 * 10 ** (-5) + ) + if cos_term: + h += 5.0 * np.cos(2.0 * np.pi * (x5 / 100.0)) - 2.0 + return h + + def f1(x1, x2, z1, z2, z3, z4, x5, cos_term): + c1 = z2 == 0 + c2 = z2 == 1 + c3 = z2 == 2 + + c4 = z3 == 0 + c5 = z3 == 1 + c6 = z3 == 2 + + y = ( + c4 + * ( + c1 * H(x1, x2, 20, 20, z3, z4, x5, cos_term) + + c2 * H(x1, x2, 50, 20, z3, z4, x5, cos_term) + + c3 * H(x1, x2, 80, 20, z3, z4, x5, cos_term) + ) + + c5 + * ( + c1 * H(x1, x2, 20, 50, z3, z4, x5, cos_term) + + c2 * H(x1, x2, 50, 50, z3, z4, x5, cos_term) + + c3 * H(x1, x2, 80, 50, z3, z4, x5, cos_term) + ) + + c6 + * ( + c1 * H(x1, x2, 20, 80, z3, z4, x5, cos_term) + + c2 * H(x1, x2, 50, 80, z3, z4, x5, cos_term) + + c3 * H(x1, x2, 80, 80, z3, z4, x5, cos_term) + ) + ) + return y + + def f2(x1, x2, x3, z2, z3, z4, x5, cos_term): + c1 = z2 == 0 + c2 = z2 == 1 + c3 = z2 == 2 + + y = ( + c1 * H(x1, x2, x3, 20, z3, z4, x5, cos_term) + + c2 * H(x1, x2, x3, 50, z3, z4, x5, cos_term) + + c3 * H(x1, x2, x3, 80, z3, z4, x5, cos_term) + ) + return y + + def f3(x1, x2, x4, z1, z3, z4, x5, cos_term): + c1 = z1 == 0 + c2 = z1 == 1 + c3 = z1 == 2 + + y = ( + c1 * H(x1, x2, 20, x4, z3, z4, x5, cos_term) + + c2 * H(x1, x2, 50, x4, z3, z4, x5, cos_term) + + c3 * H(x1, x2, 80, x4, z3, z4, x5, cos_term) + ) + return y + + y = [] + for x in X: + if x[0] == 0: + y.append( + f1(x[2], x[3], x[7], x[8], x[9], x[10], x[6], cos_term=x[1]) + ) + elif x[0] == 1: + y.append( + f2(x[2], x[3], x[4], x[8], x[9], x[10], x[6], cos_term=x[1]) + ) + elif x[0] == 2: + y.append( + f3(x[2], x[3], x[5], x[7], x[9], x[10], x[6], cos_term=x[1]) + ) + elif x[0] == 3: + y.append( + H(x[2], x[3], x[4], x[5], x[9], x[10], x[6], cos_term=x[1]) + ) + return np.array(y) + + xlimits = [ + ["6,7", "3,7", "4,6", "3,4"], # meta1 ord + [0, 1], # 0 + [0, 100], # 1 + [0, 100], # 2 + [0, 100], # 3 + [0, 100], # 4 + [0, 100], # 5 + [0, 2], # 6 + [0, 2], # 7 + [0, 2], # 8 + [0, 2], # 9 + ] + xroles = [ + XRole.META, + XRole.NEUTRAL, + XRole.NEUTRAL, + XRole.NEUTRAL, + XRole.DECREED, + XRole.DECREED, + XRole.NEUTRAL, + XRole.DECREED, + XRole.DECREED, + XRole.NEUTRAL, + XRole.NEUTRAL, + ] + # z or x, cos?; x1,x2, x3, x4, x5:cos, z1,z2; exp1,exp2 + + xtypes = [ + (XType.ENUM, 4), + XType.ORD, + XType.FLOAT, + XType.FLOAT, + XType.FLOAT, + XType.FLOAT, + XType.FLOAT, + XType.ORD, + XType.ORD, + XType.ORD, + XType.ORD, + ] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits, xroles=xroles) + n_doe = 15 + sampling = MixedIntegerSamplingMethod( + LHS, xspecs, criterion="ese", random_state=42 + ) + Xt = sampling(n_doe) + Yt = f_hv(Xt) + + sm = MixedIntegerKrigingModel( + surrogate=KRG( + xspecs=xspecs, + categorical_kernel=MixIntKernelType.HOMO_HSPHERE, + theta0=[1e-2], + corr="abs_exp", + n_start=5, + ), + ) + sm.set_training_values(Xt, Yt) + sm.train() + y_s = sm.predict_values(Xt)[:, 0] + pred_RMSE = np.linalg.norm(y_s - Yt) / len(Yt) + + y_sv = sm.predict_variances(Xt)[:, 0] + var_RMSE = np.linalg.norm(y_sv) / len(Yt) + +:: + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 15 + + Predicting ... + Predicting - done. Time (sec): 0.0159690 + + Prediction time/pt. (sec) : 0.0010646 + + + + References ---------- diff --git a/doc/_src_docs/applications/Mixed_Hier_surr.rstx b/doc/_src_docs/applications/Mixed_Hier_surr.rstx new file mode 100644 index 000000000..6d4df8e75 --- /dev/null +++ b/doc/_src_docs/applications/Mixed_Hier_surr.rstx @@ -0,0 +1,76 @@ +.. _Mixed Integer and hierarchical Surrogates: + +Mixed integer surrogate +======================= + +To use a surrogate with mixed integer constraints, the user instanciates a ``MixedIntegerSurrogateModel`` with the given surrogate. +The ``MixedIntegerSurrogateModel`` implements the ``SurrogateModel`` interface and decorates the given surrogate while respecting integer and categorical types. +They are various surrogate models implemented that are described below. + +For Kriging models, several methods to construct the mixed categorical correlation kernel are implemented. As a consequence, the user can instanciates a ``MixedIntegerKrigingeModel`` with the given kernel for Kriging. + +Mixed Integer Surrogate with Continuous Relaxation +-------------------------------------------------- + +For enum variables, as many x features are added as there is enumerated levels for the variables. These new dimensions have [0, 1] bounds and the max of these feature float values will correspond to the choice of one the enum value: this is the so-called "one-hot encoding". +For instance, for a categorical variable (one feature of x) with three levels ["blue", "red", "green"], 3 continuous float features x0, x1, x2 are created. Thereafter, the value max(x0, x1, x2), for instance, x1, will give "red" as the value for the original categorical feature. Details can be found in [1]_ . + + +Example of mixed integer Polynomial (QP) surrogate +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. embed-test-print-plot :: smt.applications.tests.test_mixed_integer , TestMixedInteger , run_mixed_integer_qp_example , 80 + + +Mixed Integer Kriging with Gower Distance +----------------------------------------- + +Another implemented method to tackle mixed integer with Kriging is using a basic mixed integer kernel based on the Gower distance between two points. +When constructing the correlation kernel, the distance is redefined as :math:`\Delta= \Delta_{cont} + \Delta_{cat}`, with :math:`\Delta_{cont}` the continuous distance as usual and :math:`\Delta_ {cat}` the categorical distance defined as the number of categorical variables that differs from one point to another. + +For example, the Gower Distance between ``[1,'red', 'medium']`` and ``[1.2,'red', 'large']`` is :math:`\Delta= 0.2+ (0` ``'red'`` :math:`=` ``'red'`` :math:`+ 1` ``'medium'`` :math:`\neq` ``'large'`` ) :math:`=1.2`. +With this distance, a mixed integer kernel can be build. Details can be found in [1]_ . + +Example of mixed integer Gower Distance model +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. embed-test-print-plot :: smt.applications.tests.test_mixed_integer , TestMixedInteger , run_mixed_gower_example, 80 + + +Mixed Integer Kriging with Group Kernel (Homoscedastic Hypersphere) +------------------------------------------------------------------- + +This surrogate model assumes that the correlation kernel between the levels of a given variable is a symmetric positive definite matrix. The latter matrix is estimated through an hypersphere parametrization depending on several hyperparameters. To finish with, the data correlation matrix is build as the product of the correlation matrices over the various variables. Details can be found in [1]_ . Note that this model is the only one to consider negative correlations between levels ("blue" can be correlated negatively to "red"). + +Example of mixed integer Homoscedastic Hypersphere model +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. embed-test-print-plot :: smt.applications.tests.test_mixed_integer , TestMixedInteger , run_mixed_homo_hyp_example, 80 + + +Mixed Integer Kriging with Exponential Homoscedastic Hypersphere +---------------------------------------------------------------- + +This surrogate model also consider that the correlation kernel between the levels of a given variable is a symmetric positive definite matrix. The latter matrix is estimated through an hypersphere parametrization depending on several hyperparameters. Thereafter, an exponential kernel is applied to the matrix. To finish with, the data correlation matrix is build as the product of the correlation matrices over the various variables. Therefore, this model could not model negative correlation and only works with absolute exponential and Gaussian kernels. Details can be found in [1]_ . + +Example of mixed integer Exponential Homoscedastic Hypersphere model +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. embed-test-print-plot :: smt.applications.tests.test_mixed_integer , TestMixedInteger , run_mixed_homo_gaussian_example, 80 + + +Mixed Integer Kriging with hierarchical variables +------------------------------------------------- + +The class ``XSpecs`` implements the roles, variables and types of the variables. Therefore, by specifying the variables, a ``MixedIntegerKrigingeModel`` for both Hierarchical and mixed categorical variables can be build. More details are given in the usage section. + +Example of mixed integer Kriging with hierarchical variables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. embed-test-print :: smt.applications.tests.test_mixed_integer , TestMixedInteger , run_hierarchical_variables_Goldstein + + +References +---------- + +.. [1] Saves, P. and Diouane, Y. and Bartoli, N. and Lefebvre, T. and Morlier, J. (2022). A general square exponential kernel to handle mixed-categorical variables for Gaussian process. AIAA Aviation 2022 Forum. diff --git a/doc/_src_docs/applications/Mixed_Hier_surr_TestMixedInteger_run_hierarchical_variables_Goldstein.png b/doc/_src_docs/applications/Mixed_Hier_surr_TestMixedInteger_run_hierarchical_variables_Goldstein.png new file mode 100644 index 000000000..cb9e1b8c2 Binary files /dev/null and b/doc/_src_docs/applications/Mixed_Hier_surr_TestMixedInteger_run_hierarchical_variables_Goldstein.png differ diff --git a/doc/_src_docs/applications/mixed_integer_TestMixedInteger_test_mixed_gower.png b/doc/_src_docs/applications/Mixed_Hier_surr_TestMixedInteger_run_mixed_gower_example.png similarity index 100% rename from doc/_src_docs/applications/mixed_integer_TestMixedInteger_test_mixed_gower.png rename to doc/_src_docs/applications/Mixed_Hier_surr_TestMixedInteger_run_mixed_gower_example.png diff --git a/doc/_src_docs/applications/mixed_integer_TestMixedInteger_test_mixed_homo_gaussian.png b/doc/_src_docs/applications/Mixed_Hier_surr_TestMixedInteger_run_mixed_homo_gaussian_example.png similarity index 100% rename from doc/_src_docs/applications/mixed_integer_TestMixedInteger_test_mixed_homo_gaussian.png rename to doc/_src_docs/applications/Mixed_Hier_surr_TestMixedInteger_run_mixed_homo_gaussian_example.png diff --git a/doc/_src_docs/applications/mixed_integer_TestMixedInteger_test_mixed_homo_hyp.png b/doc/_src_docs/applications/Mixed_Hier_surr_TestMixedInteger_run_mixed_homo_hyp_example.png similarity index 100% rename from doc/_src_docs/applications/mixed_integer_TestMixedInteger_test_mixed_homo_hyp.png rename to doc/_src_docs/applications/Mixed_Hier_surr_TestMixedInteger_run_mixed_homo_hyp_example.png diff --git a/doc/_src_docs/applications/mixed_integer_TestMixedInteger_run_mixed_integer_qp_example.png b/doc/_src_docs/applications/Mixed_Hier_surr_TestMixedInteger_run_mixed_integer_qp_example.png similarity index 100% rename from doc/_src_docs/applications/mixed_integer_TestMixedInteger_run_mixed_integer_qp_example.png rename to doc/_src_docs/applications/Mixed_Hier_surr_TestMixedInteger_run_mixed_integer_qp_example.png diff --git a/doc/_src_docs/applications/Mixed_Hier_usage.rst b/doc/_src_docs/applications/Mixed_Hier_usage.rst new file mode 100644 index 000000000..ca44c230a --- /dev/null +++ b/doc/_src_docs/applications/Mixed_Hier_usage.rst @@ -0,0 +1,183 @@ +.. _Mixed Integer and Hierarchical Variables Types Specifications: + +Mixed Integer and Hierarchical usage (Variables, Sampling and Context) +====================================================================== + +Mixed integer variables types +----------------------------- + +SMT provides the ``mixed_integer`` module to adapt existing surrogates to deal with categorical (or enumerate) and ordered integer variables using continuous relaxation. +For ordered variables, the values are rounded to the nearest values from a provided list. If, instead, only lower and upper bounds are provided, the list of all possible values will consists of the integers values between those bounds. + +The user specifies x feature types through a list of types to be either: + +- ``FLOAT``: a continuous feature, +- ``ORD``: an ordered valued feature, +- or a tuple ``(ENUM, n)`` where n is the number of levels of the catagorical feature (i.e. an enumerate with n values) + +In the case of mixed integer sampling, bounds of each x feature have to be adapted to take into account feature types. While ``FLOAT`` and ``ORD`` feature still have an interval [lower bound, upper bound], the ``ENUM`` features bounds is defined by giving the enumeration/list of possible values (levels). + +For instance, if we have the following ``xtypes``: ``[FLOAT, ORD, (ENUM, 2), (ENUM, 3)]``, a compatible ``xlimits`` could be ``[[0., 4], [-10, 10], ["blue", "red"], ["short", "medium", "long"]]``. + +However, the functioning of ``ORD`` is twofold. As previously mentioned, it can be used like [lower bound, upper bound], in this case [0,5] will corresponds to [0,1,2,3,4,5]. But, on the other hand, ``ORD`` can be used as an enumeration/list of possible values (levels), in this case ["0","5","6"] will corresponds to [0,5,6]. However, these ordered values should be string representation of integer. Details can be found in [1]_ . + +Hierarchical variables roles +---------------------------- + +The ``mixed_integer`` module uses the framework of Audet et al. [2]_ to manage both mixed variables and hierarchical variables. We distinguish dimensional (or meta) variables which are a special type of variables that may affect the dimension of the problem and decide if some other decreed variables are included or excluded. The variable size problem can also includes neutral variables that are always included and actives. + +The user specifies x feature role through a list of roles amongst: + +- ``META``: a dimensional feature, +- ``DECREED``: an ordered or continuous decreed feature: either included or excluded in the variable-size problem, +- ``NEUTRAL``: a neutral feature, part of the fixed-size problem + +Note that we do not consider decreed categorical variable. The decreed variables are always continuous or ordered. + +Mixed and hierarchical specifications +------------------------------------- + +The ``XSpecs`` class helps implements the types, limits and roles of each variables as follows. + + .. autoclass:: smt.utils.kriging.XSpecs + +Mixed integer sampling method +----------------------------- + +In the case of mixed integer sampling, bounds of each x feature have to be adapted to take into account feature types. While ``FLOAT`` and ``ORD`` feature still have an interval [lower bound, upper bound], the ``ENUM`` features bounds is defined by giving the enumeration/list of possible values (levels). + +For instance, if we have the following ``xtypes``: ``[FLOAT, ORD, (ENUM, 2), (ENUM, 3)]``, a compatible ``xlimits`` could be ``[[0., 4], [-10, 10], ["blue", "red"], ["short", "medium", "long"]]``. + +However, the functioning of ``ORD`` is twofold. As previously mentioned, it can be used like [lower bound, upper bound], in this case [0,5] will corresponds to [0,1,2,3,4,5]. But, on the other hand, ``ORD`` can be used as an enumeration/list of possible values (levels), in this case ["0","5","6"] will corresponds to [0,5,6]. + +To use a sampling method with mixed integer typed features, the user instanciates a ``MixedIntegerSamplingMethod`` with a given sampling method. +The ``MixedIntegerSamplingMethod`` implements the ``SamplingMethod`` interface and decorates the original sampling method to provide a DOE while conforming to integer and categorical types. + +Example of mixed integer LHS sampling method +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + from matplotlib import colors + + from smt.sampling_methods import LHS + from smt.surrogate_models import XType, XSpecs + from smt.applications.mixed_integer import MixedIntegerSamplingMethod + + xtypes = [XType.FLOAT, (XType.ENUM, 2)] + xlimits = [[0.0, 4.0], ["blue", "red"]] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + sampling = MixedIntegerSamplingMethod(LHS, xspecs, criterion="ese") + + num = 40 + x = sampling(num) + + cmap = colors.ListedColormap(xlimits[1]) + plt.scatter(x[:, 0], np.zeros(num), c=x[:, 1], cmap=cmap) + plt.show() + +.. figure:: Mixed_Hier_usage_TestMixedInteger_run_mixed_integer_lhs_example.png + :scale: 80 % + :align: center + +Mixed integer context +--------------------- + +The ``MixedIntegerContext`` class helps the user to use mixed integer sampling methods and surrogate models consistently by acting as a factory for those objects given a x specification: (xtypes, xlimits). + + .. autoclass:: smt.applications.mixed_integer.MixedIntegerContext + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.__init__ + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.build_sampling_method + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.build_surrogate_model + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.cast_to_discrete_values + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.fold_with_enum_index + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.unfold_with_enum_mask + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.cast_to_mixed_integer + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.cast_to_enum_value + +Example of mixed integer context usage +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + from matplotlib import colors + from mpl_toolkits.mplot3d import Axes3D + + from smt.sampling_methods import LHS, Random + from smt.surrogate_models import KRG, XType, XSpecs + from smt.applications.mixed_integer import MixedIntegerContext + + xtypes = [XType.ORD, XType.FLOAT, (XType.ENUM, 4)] + xlimits = [[0, 5], [0.0, 4.0], ["blue", "red", "green", "yellow"]] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + def ftest(x): + return (x[:, 0] * x[:, 0] + x[:, 1] * x[:, 1]) * (x[:, 2] + 1) + + # context to create consistent DOEs and surrogate + mixint = MixedIntegerContext(xspecs=xspecs) + + # DOE for training + lhs = mixint.build_sampling_method(LHS, criterion="ese") + + num = mixint.get_unfolded_dimension() * 5 + print("DOE point nb = {}".format(num)) + xt = lhs(num) + yt = ftest(xt) + + # Surrogate + sm = mixint.build_kriging_model(KRG()) + sm.set_training_values(xt, yt) + sm.train() + + # DOE for validation + rand = mixint.build_sampling_method(Random) + xv = rand(50) + yv = ftest(xv) + yp = sm.predict_values(xv) + + plt.plot(yv, yv) + plt.plot(yv, yp, "o") + plt.xlabel("actual") + plt.ylabel("prediction") + + plt.show() + +:: + + DOE point nb = 15 + ___________________________________________________________________________ + + Evaluation + + # eval points. : 50 + + Predicting ... + Predicting - done. Time (sec): 0.0009973 + + Prediction time/pt. (sec) : 0.0000199 + + +.. figure:: Mixed_Hier_usage_TestMixedInteger_run_mixed_integer_context_example.png + :scale: 80 % + :align: center + +References +---------- + +.. [1] Saves, P. and Diouane, Y. and Bartoli, N. and Lefebvre, T. and Morlier, J. (2022). A general square exponential kernel to handle mixed-categorical variables for Gaussian process. AIAA Aviation 2022 Forum. + +.. [2] Audet, C., Hallé-Hannan, E. and Le Digabel, S. A General Mathematical Framework for Constrained Mixed-variable Blackbox Optimization Problems with Meta and Categorical Variables. Oper. Res. Forum 4, 12 (2023). diff --git a/doc/_src_docs/applications/Mixed_Hier_usage.rstx b/doc/_src_docs/applications/Mixed_Hier_usage.rstx new file mode 100644 index 000000000..d1aacee51 --- /dev/null +++ b/doc/_src_docs/applications/Mixed_Hier_usage.rstx @@ -0,0 +1,94 @@ +.. _Mixed Integer and Hierarchical Variables Types Specifications: + +Mixed Integer and Hierarchical usage (Variables, Sampling and Context) +====================================================================== + +Mixed integer variables types +----------------------------- + +SMT provides the ``mixed_integer`` module to adapt existing surrogates to deal with categorical (or enumerate) and ordered integer variables using continuous relaxation. +For ordered variables, the values are rounded to the nearest values from a provided list. If, instead, only lower and upper bounds are provided, the list of all possible values will consists of the integers values between those bounds. + +The user specifies x feature types through a list of types to be either: + +- ``FLOAT``: a continuous feature, +- ``ORD``: an ordered valued feature, +- or a tuple ``(ENUM, n)`` where n is the number of levels of the catagorical feature (i.e. an enumerate with n values) + +In the case of mixed integer sampling, bounds of each x feature have to be adapted to take into account feature types. While ``FLOAT`` and ``ORD`` feature still have an interval [lower bound, upper bound], the ``ENUM`` features bounds is defined by giving the enumeration/list of possible values (levels). + +For instance, if we have the following ``xtypes``: ``[FLOAT, ORD, (ENUM, 2), (ENUM, 3)]``, a compatible ``xlimits`` could be ``[[0., 4], [-10, 10], ["blue", "red"], ["short", "medium", "long"]]``. + +However, the functioning of ``ORD`` is twofold. As previously mentioned, it can be used like [lower bound, upper bound], in this case [0,5] will corresponds to [0,1,2,3,4,5]. But, on the other hand, ``ORD`` can be used as an enumeration/list of possible values (levels), in this case ["0","5","6"] will corresponds to [0,5,6]. However, these ordered values should be string representation of integer. Details can be found in [1]_ . + +Hierarchical variables roles +---------------------------- + +The ``mixed_integer`` module uses the framework of Audet et al. [2]_ to manage both mixed variables and hierarchical variables. We distinguish dimensional (or meta) variables which are a special type of variables that may affect the dimension of the problem and decide if some other decreed variables are included or excluded. The variable size problem can also includes neutral variables that are always included and actives. + +The user specifies x feature role through a list of roles amongst: + +- ``META``: a dimensional feature, +- ``DECREED``: an ordered or continuous decreed feature: either included or excluded in the variable-size problem, +- ``NEUTRAL``: a neutral feature, part of the fixed-size problem + +Note that we do not consider decreed categorical variable. The decreed variables are always continuous or ordered. + +Mixed and hierarchical specifications +------------------------------------- + +The ``XSpecs`` class helps implements the types, limits and roles of each variables as follows. + + .. autoclass:: smt.utils.kriging.XSpecs + +Mixed integer sampling method +----------------------------- + +In the case of mixed integer sampling, bounds of each x feature have to be adapted to take into account feature types. While ``FLOAT`` and ``ORD`` feature still have an interval [lower bound, upper bound], the ``ENUM`` features bounds is defined by giving the enumeration/list of possible values (levels). + +For instance, if we have the following ``xtypes``: ``[FLOAT, ORD, (ENUM, 2), (ENUM, 3)]``, a compatible ``xlimits`` could be ``[[0., 4], [-10, 10], ["blue", "red"], ["short", "medium", "long"]]``. + +However, the functioning of ``ORD`` is twofold. As previously mentioned, it can be used like [lower bound, upper bound], in this case [0,5] will corresponds to [0,1,2,3,4,5]. But, on the other hand, ``ORD`` can be used as an enumeration/list of possible values (levels), in this case ["0","5","6"] will corresponds to [0,5,6]. + +To use a sampling method with mixed integer typed features, the user instanciates a ``MixedIntegerSamplingMethod`` with a given sampling method. +The ``MixedIntegerSamplingMethod`` implements the ``SamplingMethod`` interface and decorates the original sampling method to provide a DOE while conforming to integer and categorical types. + +Example of mixed integer LHS sampling method +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. embed-test-print-plot :: smt.applications.tests.test_mixed_integer , TestMixedInteger , run_mixed_integer_lhs_example , 80 + +Mixed integer context +--------------------- + +The ``MixedIntegerContext`` class helps the user to use mixed integer sampling methods and surrogate models consistently by acting as a factory for those objects given a x specification: (xtypes, xlimits). + + .. autoclass:: smt.applications.mixed_integer.MixedIntegerContext + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.__init__ + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.build_sampling_method + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.build_surrogate_model + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.cast_to_discrete_values + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.fold_with_enum_index + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.unfold_with_enum_mask + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.cast_to_mixed_integer + + .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.cast_to_enum_value + +Example of mixed integer context usage +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. embed-test-print-plot :: smt.applications.tests.test_mixed_integer , TestMixedInteger , run_mixed_integer_context_example , 80 + +References +---------- + +.. [1] Saves, P. and Diouane, Y. and Bartoli, N. and Lefebvre, T. and Morlier, J. (2022). A general square exponential kernel to handle mixed-categorical variables for Gaussian process. AIAA Aviation 2022 Forum. + +.. [2] Audet, C., Hallé-Hannan, E. and Le Digabel, S. A General Mathematical Framework for Constrained Mixed-variable Blackbox Optimization Problems with Meta and Categorical Variables. Oper. Res. Forum 4, 12 (2023). diff --git a/doc/_src_docs/applications/Mixed_Hier_usage_TestMixedInteger_run_mixed_integer_context_example.png b/doc/_src_docs/applications/Mixed_Hier_usage_TestMixedInteger_run_mixed_integer_context_example.png new file mode 100644 index 000000000..36122104a Binary files /dev/null and b/doc/_src_docs/applications/Mixed_Hier_usage_TestMixedInteger_run_mixed_integer_context_example.png differ diff --git a/doc/_src_docs/applications/Mixed_Hier_usage_TestMixedInteger_run_mixed_integer_lhs_example.png b/doc/_src_docs/applications/Mixed_Hier_usage_TestMixedInteger_run_mixed_integer_lhs_example.png new file mode 100644 index 000000000..4ebdf8e01 Binary files /dev/null and b/doc/_src_docs/applications/Mixed_Hier_usage_TestMixedInteger_run_mixed_integer_lhs_example.png differ diff --git a/doc/_src_docs/applications/ego.rst b/doc/_src_docs/applications/ego.rst index 3a60e75bb..09282c790 100644 --- a/doc/_src_docs/applications/ego.rst +++ b/doc/_src_docs/applications/ego.rst @@ -199,6 +199,7 @@ Usage import numpy as np from smt.applications import EGO + from smt.surrogate_models import KRG, XSpecs import matplotlib.pyplot as plt def function_test_1d(x): @@ -212,12 +213,18 @@ Usage n_iter = 6 xlimits = np.array([[0.0, 25.0]]) + xspecs = XSpecs(xlimits=xlimits) xdoe = np.atleast_2d([0, 7, 25]).T n_doe = xdoe.size criterion = "EI" #'EI' or 'SBO' or 'LCB' - ego = EGO(n_iter=n_iter, criterion=criterion, xdoe=xdoe, xlimits=xlimits) + ego = EGO( + n_iter=n_iter, + criterion=criterion, + xdoe=xdoe, + surrogate=KRG(xspecs=xspecs, print_global=False), + ) x_opt, y_opt, _, x_data, y_data = ego.optimize(fun=function_test_1d) print("Minimum in x={:.1f} with f(x)={:.1f}".format(float(x_opt), float(y_opt))) @@ -287,14 +294,10 @@ Usage with parallel options import numpy as np from smt.applications import EGO - from smt.applications.ego import EGO, Evaluator - from smt.sampling_methods import FullFactorial + from smt.applications.ego import Evaluator + from smt.surrogate_models import KRG, XSpecs - import sklearn import matplotlib.pyplot as plt - from matplotlib import colors - from mpl_toolkits.mplot3d import Axes3D - from scipy.stats import norm def function_test_1d(x): # function xsinx @@ -309,6 +312,7 @@ Usage with parallel options n_parallel = 3 n_start = 50 xlimits = np.array([[0.0, 25.0]]) + xspecs = XSpecs(xlimits=xlimits) xdoe = np.atleast_2d([0, 7, 25]).T n_doe = xdoe.size @@ -343,7 +347,7 @@ Usage with parallel options n_iter=n_iter, criterion=criterion, xdoe=xdoe, - xlimits=xlimits, + surrogate=KRG(xspecs=xspecs, print_global=False), n_parallel=n_parallel, qEI=qEI, n_start=n_start, @@ -439,11 +443,11 @@ Usage with mixed variable import numpy as np from smt.applications import EGO - from smt.applications.mixed_integer import ( - MixedIntegerContext, - FLOAT, - ENUM, - ORD, + from smt.applications.mixed_integer import MixedIntegerContext + from smt.surrogate_models import ( + XType, + MixIntKernelType, + XSpecs, ) import matplotlib.pyplot as plt from smt.surrogate_models import KRG @@ -473,17 +477,22 @@ Usage with mixed variable + (x2 + 2 * x3 + 3 * x4) * x6 * 0.95 * x1 + i ) - return y + return y.reshape((-1, 1)) n_iter = 15 - xtypes = [FLOAT, (ENUM, 3), (ENUM, 2), ORD] + xtypes = [XType.FLOAT, (XType.ENUM, 3), (XType.ENUM, 2), XType.ORD] xlimits = np.array( - [[-5, 5], ["red", "green", "blue"], ["square", "circle"], [0, 2]] + [[-5, 5], ["red", "green", "blue"], ["square", "circle"], [0, 2]], + dtype="object", ) + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + criterion = "EI" #'EI' or 'SBO' or 'LCB' - qEI = "KB" - sm = KRG(print_global=False) - mixint = MixedIntegerContext(xtypes, xlimits) + qEI = "KBRand" + sm = KRG( + xspecs=xspecs, categorical_kernel=MixIntKernelType.GOWER, print_global=False + ) + mixint = MixedIntegerContext(xspecs) n_doe = 3 sampling = mixint.build_sampling_method(LHS, criterion="ese", random_state=42) xdoe = sampling(n_doe) @@ -494,10 +503,9 @@ Usage with mixed variable criterion=criterion, xdoe=xdoe, ydoe=ydoe, - xtypes=xtypes, - xlimits=xlimits, surrogate=sm, qEI=qEI, + n_parallel=2, random_state=42, ) @@ -523,8 +531,8 @@ Usage with mixed variable :: - Minimum in x=[-5. 2. 1. 1.] with f(x)=-13.2 - Minimum in typed x=[-5.0, 'blue', 'circle', 1] + Minimum in x=[-5. 2. 1. 0.] with f(x)=-14.2 + Minimum in typed x=[-5.0, 'blue', 'circle', 0] .. figure:: ego_TestEGO_run_ego_mixed_integer_example.png :scale: 80 % @@ -580,7 +588,7 @@ Options - ['str'] - Approximated q-EI maximization strategy * - evaluator - - + - - None - ['Evaluator'] - Object used to run function fun to optimize at x points (nsamples, nxdim) @@ -599,11 +607,6 @@ Options - None - ['ndarray'] - Initial doe outputs - * - xlimits - - None - - None - - ['ndarray'] - - Bounds of function fun inputs * - verbose - False - None @@ -614,21 +617,11 @@ Options - None - ['bool'] - Enable the penalization of points that have been already evaluated in EI criterion - * - categorical_kernel - - None - - ['gower_matrix_kernel', 'exponential_homoscedastic_matrix_kernel', 'homoscedastic_matrix_kernel', 'continuous_relaxation_matrix_kernel'] - - None - - The kernel to use for categorical inputs. Only for non continuous Kriging. * - surrogate - - + - - None - ['KRG', 'KPLS', 'KPLSK', 'GEKPLS', 'MGP'] - SMT kriging-based surrogate model used internaly - * - xtypes - - None - - None - - ['list'] - - x type specifications: either FLOAT for continuous, INT for integer or (ENUM n) for categorical doimension with n levels * - random_state - None - None diff --git a/doc/_src_docs/applications/ego_TestEGO_run_ego_example.png b/doc/_src_docs/applications/ego_TestEGO_run_ego_example.png index 969d09c90..5ee5ee52b 100644 Binary files a/doc/_src_docs/applications/ego_TestEGO_run_ego_example.png and b/doc/_src_docs/applications/ego_TestEGO_run_ego_example.png differ diff --git a/doc/_src_docs/applications/ego_TestEGO_run_ego_mixed_integer_example.png b/doc/_src_docs/applications/ego_TestEGO_run_ego_mixed_integer_example.png index 737248775..eb6f31cfa 100644 Binary files a/doc/_src_docs/applications/ego_TestEGO_run_ego_mixed_integer_example.png and b/doc/_src_docs/applications/ego_TestEGO_run_ego_mixed_integer_example.png differ diff --git a/doc/_src_docs/applications/ego_TestEGO_run_ego_parallel_example.png b/doc/_src_docs/applications/ego_TestEGO_run_ego_parallel_example.png index 51eb775b5..b7c9de2ec 100644 Binary files a/doc/_src_docs/applications/ego_TestEGO_run_ego_parallel_example.png and b/doc/_src_docs/applications/ego_TestEGO_run_ego_parallel_example.png differ diff --git a/doc/_src_docs/applications/mfk.rst b/doc/_src_docs/applications/mfk.rst index 01ff9ddf5..5dd368681 100644 --- a/doc/_src_docs/applications/mfk.rst +++ b/doc/_src_docs/applications/mfk.rst @@ -103,7 +103,7 @@ Usage Training Training ... - Training - done. Time (sec): 0.0940485 + Training - done. Time (sec): 0.1007385 ___________________________________________________________________________ Evaluation @@ -122,9 +122,9 @@ Usage # eval points. : 101 Predicting ... - Predicting - done. Time (sec): 0.0010331 + Predicting - done. Time (sec): 0.0000000 - Prediction time/pt. (sec) : 0.0000102 + Prediction time/pt. (sec) : 0.0000000 .. figure:: mfk_TestMFK_run_mfk_example.png @@ -181,14 +181,9 @@ Options - Correlation function type * - categorical_kernel - None - - ['continuous_relaxation_matrix_kernel', 'gower_matrix_kernel', 'exponential_homoscedastic_matrix_kernel', 'homoscedastic_matrix_kernel'] + - [, , , ] - None - The kernel to use for categorical inputs. Only for non continuous Kriging - * - xtypes - - None - - None - - ['list'] - - x type specifications: either FLOAT for continuous, INT for integer or (ENUM n) for categorical dimension with n levels * - nugget - 2.220446049250313e-14 - None @@ -234,6 +229,15 @@ Options - None - ['int'] - number of optimizer runs (multistart method) + * - xspecs + - None + - None + - ['XSpecs'] + - xspecs : x specifications including + xtypes: x types list + x types specification: list of either FLOAT, ORD or (ENUM, n) spec. + xlimits: array-like + bounds of x features * - rho_regr - constant - ['constant', 'linear', 'quadratic'] diff --git a/doc/_src_docs/applications/mfkpls.rst b/doc/_src_docs/applications/mfkpls.rst index 3e9e3b4ef..211128b1a 100644 --- a/doc/_src_docs/applications/mfkpls.rst +++ b/doc/_src_docs/applications/mfkpls.rst @@ -108,7 +108,7 @@ Usage Training Training ... - Training - done. Time (sec): 0.1016924 + Training - done. Time (sec): 0.1017361 ___________________________________________________________________________ Evaluation @@ -186,14 +186,9 @@ Options - Correlation function type * - categorical_kernel - None - - ['continuous_relaxation_matrix_kernel', 'gower_matrix_kernel', 'exponential_homoscedastic_matrix_kernel', 'homoscedastic_matrix_kernel'] + - [, , , ] - None - The kernel to use for categorical inputs. Only for non continuous Kriging - * - xtypes - - None - - None - - ['list'] - - x type specifications: either FLOAT for continuous, INT for integer or (ENUM n) for categorical dimension with n levels * - nugget - 2.220446049250313e-14 - None @@ -239,6 +234,15 @@ Options - None - ['int'] - number of optimizer runs (multistart method) + * - xspecs + - None + - None + - ['XSpecs'] + - xspecs : x specifications including + xtypes: x types list + x types specification: list of either FLOAT, ORD or (ENUM, n) spec. + xlimits: array-like + bounds of x features * - rho_regr - constant - ['constant', 'linear', 'quadratic'] diff --git a/doc/_src_docs/applications/mfkplsk.rst b/doc/_src_docs/applications/mfkplsk.rst index c3beeec1e..37bd60c3e 100644 --- a/doc/_src_docs/applications/mfkplsk.rst +++ b/doc/_src_docs/applications/mfkplsk.rst @@ -108,7 +108,7 @@ Usage Training Training ... - Training - done. Time (sec): 0.1605699 + Training - done. Time (sec): 0.1601353 ___________________________________________________________________________ Evaluation @@ -116,9 +116,9 @@ Usage # eval points. : 101 Predicting ... - Predicting - done. Time (sec): 0.0009983 + Predicting - done. Time (sec): 0.0000000 - Prediction time/pt. (sec) : 0.0000099 + Prediction time/pt. (sec) : 0.0000000 ___________________________________________________________________________ @@ -186,14 +186,9 @@ Options - Correlation function type * - categorical_kernel - None - - ['continuous_relaxation_matrix_kernel', 'gower_matrix_kernel', 'exponential_homoscedastic_matrix_kernel', 'homoscedastic_matrix_kernel'] + - [, , , ] - None - The kernel to use for categorical inputs. Only for non continuous Kriging - * - xtypes - - None - - None - - ['list'] - - x type specifications: either FLOAT for continuous, INT for integer or (ENUM n) for categorical dimension with n levels * - nugget - 2.220446049250313e-14 - None @@ -239,6 +234,15 @@ Options - None - ['int'] - number of optimizer runs (multistart method) + * - xspecs + - None + - None + - ['XSpecs'] + - xspecs : x specifications including + xtypes: x types list + x types specification: list of either FLOAT, ORD or (ENUM, n) spec. + xlimits: array-like + bounds of x features * - rho_regr - constant - ['constant', 'linear', 'quadratic'] diff --git a/doc/_src_docs/applications/mfkplsk_TestMFKPLSK_run_mfkplsk_example.png b/doc/_src_docs/applications/mfkplsk_TestMFKPLSK_run_mfkplsk_example.png index 64bbeeb99..8421e7f9c 100644 Binary files a/doc/_src_docs/applications/mfkplsk_TestMFKPLSK_run_mfkplsk_example.png and b/doc/_src_docs/applications/mfkplsk_TestMFKPLSK_run_mfkplsk_example.png differ diff --git a/doc/_src_docs/applications/mgp.rst b/doc/_src_docs/applications/mgp.rst index 0bb96469c..0615eae99 100644 --- a/doc/_src_docs/applications/mgp.rst +++ b/doc/_src_docs/applications/mgp.rst @@ -128,7 +128,7 @@ Usage Training Training ... - Training - done. Time (sec): 0.9420376 + Training - done. Time (sec): 1.0323844 .. figure:: mgp_Test_test_mgp.png :scale: 80 % @@ -184,14 +184,9 @@ Options - Correlation function type * - categorical_kernel - None - - ['continuous_relaxation_matrix_kernel', 'gower_matrix_kernel', 'exponential_homoscedastic_matrix_kernel', 'homoscedastic_matrix_kernel'] + - [, , , ] - None - The kernel to use for categorical inputs. Only for non continuous Kriging - * - xtypes - - None - - None - - ['list'] - - x type specifications: either FLOAT for continuous, INT for integer or (ENUM n) for categorical dimension with n levels * - nugget - 2.220446049250313e-14 - None @@ -237,6 +232,15 @@ Options - None - ['int'] - number of optimizer runs (multistart method) + * - xspecs + - None + - None + - ['XSpecs'] + - xspecs : x specifications including + xtypes: x types list + x types specification: list of either FLOAT, ORD or (ENUM, n) spec. + xlimits: array-like + bounds of x features * - n_comp - 1 - None diff --git a/doc/_src_docs/applications/mixed_integer.rstx b/doc/_src_docs/applications/mixed_integer.rstx deleted file mode 100644 index a9c1059a5..000000000 --- a/doc/_src_docs/applications/mixed_integer.rstx +++ /dev/null @@ -1,125 +0,0 @@ -.. _Mixed-Integer Sampling and and Variables Types Specifications: - -Mixed-Integer usage (Variables, Sampling and Context) -===================================================== - -SMT provides the ``mixed_integer`` module to adapt existing surrogates to deal with categorical (or enumerate) and ordered variables using continuous relaxation. -For ordered variables, the values are rounded to the nearest values from a provided list. If, instead, bounds are provided, the list will consist of all integers between those bounds. - -The user specifies x feature types through a list of types to be either: - -- ``FLOAT``: a continuous feature, -- ``ORD``: an ordered valued feature, -- or a tuple ``(ENUM, n)`` where n is the number of levels of the catagorical feature (i.e. an enumerate with n values) - -In the case of mixed integer sampling, bounds of each x feature have to be adapted to take into account feature types. While ``FLOAT`` and ``ORD`` feature still have an interval [lower bound, upper bound], the ``ENUM`` features bounds is defined by giving the enumeration/list of possible values (levels). - -For instance, if we have the following ``xtypes``: ``[FLOAT, ORD, (ENUM, 2), (ENUM, 3)]``, a compatible ``xlimits`` could be ``[[0., 4], [-10, 10], ["blue", "red"], ["short", "medium", "long"]]``. - -However, the functioning of ``ORD`` is twofold. As previously mentioned, it can be used like [lower bound, upper bound], in this case [0,5] will corresponds to [0,1,2,3,4,5]. But, on the other hand, ``ORD`` can be used as an enumeration/list of possible values (levels), in this case ["0","5","6"] will corresponds to [0,5,6]. - - -Mixed integer sampling method ------------------------------ - -To use a sampling method with mixed integer typed features, the user instanciates a ``MixedIntegerSamplingMethod`` with a given sampling method. -The ``MixedIntegerSamplingMethod`` implements the ``SamplingMethod`` interface and decorates the original sampling method to provide a DOE while conforming to integer and categorical types. - -Example of mixed-integer LHS sampling method -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. embed-test-print-plot :: smt.applications.tests.test_mixed_integer , TestMixedInteger , run_mixed_integer_lhs_example , 80 - -Mixed integer context ---------------------- - -the ``MixedIntegerContext`` class helps the user to use mixed integer sampling methods and surrogate models consistently by acting as a factory for those objects given a x specification: (xtypes, xlimits). - - .. autoclass:: smt.applications.mixed_integer.MixedIntegerContext - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.__init__ - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.build_sampling_method - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.build_surrogate_model - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.cast_to_discrete_values - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.fold_with_enum_index - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.unfold_with_enum_mask - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.cast_to_mixed_integer - - .. automethod:: smt.applications.mixed_integer.MixedIntegerContext.cast_to_enum_value - -Example of mixed-integer context usage -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. embed-test-print-plot :: smt.applications.tests.test_mixed_integer , TestMixedInteger , run_mixed_integer_context_example , 80 - - -.. _Mixed-Integer Surrogates: - -Mixed integer surrogate -======================= - -To use a surrogate with mixed integer constraints, the user instanciates a ``MixedIntegerSurrogateModel`` with the given surrogate. -The ``MixedIntegerSurrogateModel`` implements the ``SurrogateModel`` interface and decorates the given surrogate while respecting integer and categorical types. -They are various surrogate models implemented that are described below. - -Mixed-Integer Surrogate with Continuous Relaxation --------------------------------------------------- - -For enum variables, as many x features are added as there is enumerated levels for the variables. These new dimensions have [0, 1] bounds and the max of these feature float values will correspond to the choice of one the enum value: this is the so-called "one-hot encoding". -For instance, for a categorical variable (one feature of x) with three levels ["blue", "red", "green"], 3 continuous float features x0, x1, x2 are created. Thereafter, the value max(x0, x1, x2), for instance, x1, will give "red" as the value for the original categorical feature. Details can be found in [1]_ . - - -Example of mixed-integer Polynomial (QP) surrogate -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. embed-test-print-plot :: smt.applications.tests.test_mixed_integer , TestMixedInteger , run_mixed_integer_qp_example , 80 - - -Mixed-Integer Surrogate with Gower Distance -------------------------------------------- - -Another implemented method is using a basic mixed integer kernel based on the Gower distance between two points. -When constructing the correlation kernel, the distance is redefined as :math:`\Delta= \Delta_{cont} + \Delta_{cat}`, with :math:`\Delta_{cont}` the continuous distance as usual and :math:`\Delta_ {cat}` the categorical distance defined as the number of categorical variables that differs from one point to another. - -For example, the Gower Distance between ``[1,'red', 'medium']`` and ``[1.2,'red', 'large']`` is :math:`\Delta= 0.2+ (0` ``'red'`` :math:`=` ``'red'`` :math:`+ 1` ``'medium'`` :math:`\neq` ``'large'`` ) :math:`=1.2`. -With this distance, a mixed integer kernel can be build. Details can be found in [1]_ . - -Example of mixed-integer Gower Distance model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. embed-test-print-plot :: smt.applications.tests.test_mixed_integer , TestMixedInteger , run_mixed_gower_example, 80 - - - -Mixed-Integer Surrogate with Group Kernel (Homoscedastic Hypersphere) ---------------------------------------------------------------------- - -This surrogate model consider that the correlation kernel between the levels of a given variable is a symmetric positive definite matrix. The latter matrix is estimated through an hypersphere parametrization depending on several hyperparameters. To finish with, the data correlation matrix is build as the product of the correlation matrices over the various variables. Details can be found in [1]_ . Note that this model is the only one to consider negative correlations between levels ("blue" can be correlated negatively to "red"). - -Example of mixed-integer Homoscedastic Hypersphere model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. embed-test-print-plot :: smt.applications.tests.test_mixed_integer , TestMixedInteger , run_mixed_homo_hyp_example, 80 - - -Mixed-Integer Surrogate with Exponential Homoscedastic Hypersphere ------------------------------------------------------------------- - -This surrogate model also consider that the correlation kernel between the levels of a given variable is a symmetric positive definite matrix. The latter matrix is estimated through an hypersphere parametrization depending on several hyperparameters. Thereafter, an exponential kernel is applied to the matrix. To finish with, the data correlation matrix is build as the product of the correlation matrices over the various variables. Therefore, this model could not model negative correlation and only works with absolute exponential and Gaussian kernels. Details can be found in [1]_ . - -Example of mixed-integer Exponential Homoscedastic Hypersphere model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. embed-test-print-plot :: smt.applications.tests.test_mixed_integer , TestMixedInteger , run_mixed_homo_gaussian_example, 80 - - -References ----------- - -.. [1] Saves, P. and Diouane, Y. and Bartoli, N. and Lefebvre, T. and Morlier, J. (2022). A general square exponential kernel to handle mixed-categorical variables for Gaussian process. AIAA Aviation 2022 Forum. diff --git a/doc/_src_docs/applications/mixed_integer_TestMixedInteger_run_mixed_integer_context_example.png b/doc/_src_docs/applications/mixed_integer_TestMixedInteger_run_mixed_integer_context_example.png deleted file mode 100644 index 007f17cba..000000000 Binary files a/doc/_src_docs/applications/mixed_integer_TestMixedInteger_run_mixed_integer_context_example.png and /dev/null differ diff --git a/doc/_src_docs/applications/mixed_integer_TestMixedInteger_run_mixed_integer_lhs_example.png b/doc/_src_docs/applications/mixed_integer_TestMixedInteger_run_mixed_integer_lhs_example.png deleted file mode 100644 index 5e6bbf0d2..000000000 Binary files a/doc/_src_docs/applications/mixed_integer_TestMixedInteger_run_mixed_integer_lhs_example.png and /dev/null differ diff --git a/doc/_src_docs/applications/vfm.rst b/doc/_src_docs/applications/vfm.rst index 82c5cf165..9683f4e5a 100644 --- a/doc/_src_docs/applications/vfm.rst +++ b/doc/_src_docs/applications/vfm.rst @@ -118,7 +118,7 @@ Usage Training Training ... - Training - done. Time (sec): 0.2912970 + Training - done. Time (sec): 0.2529881 .. figure:: vfm_TestVFM_run_vfm_example.png :scale: 80 % diff --git a/doc/_src_docs/applications/vfm_TestVFM_run_vfm_example.png b/doc/_src_docs/applications/vfm_TestVFM_run_vfm_example.png index dfa340950..13e4d2144 100644 Binary files a/doc/_src_docs/applications/vfm_TestVFM_run_vfm_example.png and b/doc/_src_docs/applications/vfm_TestVFM_run_vfm_example.png differ diff --git a/doc/_src_docs/dev_docs/dev_problems.rst b/doc/_src_docs/dev_docs/dev_problems.rst index 6f2adffdb..c06d8d450 100644 --- a/doc/_src_docs/dev_docs/dev_problems.rst +++ b/doc/_src_docs/dev_docs/dev_problems.rst @@ -1,8 +1,8 @@ -Developer API for benchmarking problems -======================================= - -.. autoclass:: smt.problems.problem.Problem - - .. automethod:: smt.problems.problem.Problem._initialize - - .. automethod:: smt.problems.problem.Problem._evaluate +Developer API for benchmarking problems +======================================= + +.. autoclass:: smt.problems.problem.Problem + + .. automethod:: smt.problems.problem.Problem._initialize + + .. automethod:: smt.problems.problem.Problem._evaluate diff --git a/doc/_src_docs/dev_docs/dev_sampling_methods.rst b/doc/_src_docs/dev_docs/dev_sampling_methods.rst index 0e19ae051..d760c9c3d 100644 --- a/doc/_src_docs/dev_docs/dev_sampling_methods.rst +++ b/doc/_src_docs/dev_docs/dev_sampling_methods.rst @@ -1,24 +1,24 @@ -Developer API for sampling methods -================================== - -SamplingMethod --------------- - -A base class for all sampling methods in SMT. - -.. autoclass:: smt.sampling_methods.sampling_method.SamplingMethod - - .. automethod:: smt.sampling_methods.sampling_method.SamplingMethod._initialize - - .. automethod:: smt.sampling_methods.sampling_method.SamplingMethod._compute - -ScaledSamplingMethod --------------------- - -Conveniently, if a sampling method generates samples in the [0, 1] hypercube, -one can inherit from the subclass `ScaledSamplingMethod` which -automates the scaling from unit hypercube to the input space (i.e. xlimits). - -.. autoclass:: smt.sampling_methods.sampling_method.ScaledSamplingMethod - - .. automethod:: smt.sampling_methods.sampling_method.ScaledSamplingMethod._compute +Developer API for sampling methods +================================== + +SamplingMethod +-------------- + +A base class for all sampling methods in SMT. + +.. autoclass:: smt.sampling_methods.sampling_method.SamplingMethod + + .. automethod:: smt.sampling_methods.sampling_method.SamplingMethod._initialize + + .. automethod:: smt.sampling_methods.sampling_method.SamplingMethod._compute + +ScaledSamplingMethod +-------------------- + +Conveniently, if a sampling method generates samples in the [0, 1] hypercube, +one can inherit from the subclass `ScaledSamplingMethod` which +automates the scaling from unit hypercube to the input space (i.e. xlimits). + +.. autoclass:: smt.sampling_methods.sampling_method.ScaledSamplingMethod + + .. automethod:: smt.sampling_methods.sampling_method.ScaledSamplingMethod._compute diff --git a/doc/_src_docs/dev_docs/dev_surrogate_models.rst b/doc/_src_docs/dev_docs/dev_surrogate_models.rst index 9d5cd3cc9..0ce884f18 100644 --- a/doc/_src_docs/dev_docs/dev_surrogate_models.rst +++ b/doc/_src_docs/dev_docs/dev_surrogate_models.rst @@ -1,18 +1,18 @@ -.. _surrogate-model-dev-api: - -Developer API for surrogate models -================================== - -.. autoclass:: smt.surrogate_models.surrogate_model.SurrogateModel - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel._initialize - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel._train - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel._predict_values - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel._predict_derivatives - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel._predict_output_derivatives - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel._predict_variances +.. _surrogate-model-dev-api: + +Developer API for surrogate models +================================== + +.. autoclass:: smt.surrogate_models.surrogate_model.SurrogateModel + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel._initialize + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel._train + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel._predict_values + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel._predict_derivatives + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel._predict_output_derivatives + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel._predict_variances diff --git a/doc/_src_docs/examples/airfoil_parameters/learning_airfoil_parameters.png b/doc/_src_docs/examples/airfoil_parameters/learning_airfoil_parameters.png index f8a340682..a6e50465a 100644 Binary files a/doc/_src_docs/examples/airfoil_parameters/learning_airfoil_parameters.png and b/doc/_src_docs/examples/airfoil_parameters/learning_airfoil_parameters.png differ diff --git a/doc/_src_docs/examples/airfoil_parameters/learning_airfoil_parameters.rst b/doc/_src_docs/examples/airfoil_parameters/learning_airfoil_parameters.rst index 284f056be..765e37c2c 100644 --- a/doc/_src_docs/examples/airfoil_parameters/learning_airfoil_parameters.rst +++ b/doc/_src_docs/examples/airfoil_parameters/learning_airfoil_parameters.rst @@ -1,191 +1,191 @@ -Learning Airfoil Parameters -=========================== - -This is a tutorial to determine the aerodynamic coefficients of a given airfoil using GENN in SMT (other models could be used as well). -The obtained surrogate model can be used to give predictions for certain Mach numbers, angles of attack and the aerodynamic coefficients. -These calculations can be really useful in case of an airfoil shape optimization. The input parameters uses the airfoil Camber and Thickness mode shapes. - -* Inputs: Airfoil Camber and Thickness mode shapes, Mach, alpha -* Outputs (options): cd, cl, cm - -In this test case, we will be predicting only the Cd coefficient. However, the other databases for the prediction of the -other terms are available in the same repository. Bouhlels mSANN uses the information contained in the paper [1]_ to determine -the airfoil's mode shapes. Moreover, in mSANN a deep neural network is used to predict the Cd parameter of a given parametrized -airfoil. Therefore, in this tutorial, we reproduce the paper [2]_ using the Gradient-Enhanced Neural Networks (GENN) from SMT. - -Briefly explaining how mSANN generates the mode shapes of a given airfoil: - -#. Using inverse distance weighting (IDW) to interpolate the surface function of each airfoil. -#. Then applying singular value decomposition (SVD) to reduce the number of variables that define the airfoil geometry. It includes a total of 14 airfoil modes (seven for camber and seven for thickness). -#. Totally 16 input variables, two flow conditions of Mach number (0.3 to 0.6) and the angle of attack (2 degrees to 6 degrees) plus 14 shape coefficients. -#. The output airfoil aerodynamic force coefficients and their respective gradients are computed using ADflow, which solves the RANS equations with a Spalart-Allmaras turbulence model. - -References ----------- - -.. [1] Bouhlel, M. A., He, S., & Martins, J. R. (2020). Scalable gradient–enhanced artificial neural networks for airfoil shape design in the subsonic and transonic regimes. Structural and Multidisciplinary Optimization, 61(4), 1363-1376. -.. [2] Bouhlel, M. A., He, S., and Martins, J. R. R. A., mSANN Model Benchmarks, Mendeley Data, 2019. https://doi.org/10. 17632/ngpd634smf.1. -.. [3] Li, J., Bouhlel, M. A., & Martins, J. R. (2019). Data-based approach for fast airfoil analysis and optimization. AIAA Journal, 57(2), 581-596. -.. [4] Bouhlel, M. A., & Martins, J. R. (2019). Gradient-enhanced kriging for high-dimensional problems. Engineering with Computers, 35(1), 157-173. -.. [5] Du, X., He, P., & Martins, J. R. (2021). Rapid airfoil design optimization via neural networks-based parameterization and surrogate modeling. Aerospace Science and Technology, 113, 106701. -.. [6] University of Michigan, Webfoil, 2021. URL http://webfoil.engin.umich.edu/, online accessed on 16 of June 2021. - -Implementation --------------- - -Utilities -^^^^^^^^^ - -.. code-block:: python - - import os - import numpy as np - import csv - - WORKDIR = os.path.dirname(os.path.abspath(__file__)) - - - def load_NACA4412_modeshapes(): - return np.loadtxt(open(os.path.join(WORKDIR, "modes_NACA4412_ct.txt"))) - - - def load_cd_training_data(): - with open(os.path.join(WORKDIR, "cd_x_y.csv")) as file: - reader = csv.reader(file, delimiter=";") - values = np.array(list(reader), dtype=np.float32) - dim_values = values.shape - x = values[:, : dim_values[1] - 1] - y = values[:, -1] - with open(os.path.join(WORKDIR, "cd_dy.csv")) as file: - reader = csv.reader(file, delimiter=";") - dy = np.array(list(reader), dtype=np.float32) - return x, y, dy - - - def plot_predictions(airfoil_modeshapes, Ma, cd_model): - import matplotlib - - matplotlib.use("Agg") - import matplotlib.pyplot as plt - - # alpha is linearily distributed over the range of -1 to 7 degrees - # while Ma is kept constant - inputs = np.zeros(shape=(1, 15)) - inputs[0, :14] = airfoil_modeshapes - inputs[0, -1] = Ma - inputs = np.tile(inputs, (50, 1)) - - alpha = np.atleast_2d([-1 + 0.16 * i for i in range(50)]).T - - inputs = np.concatenate((inputs, alpha), axis=1) - - # Predict Cd - cd_pred = cd_model.predict_values(inputs) - - # Load ADflow Cd reference - with open(os.path.join(WORKDIR, "NACA4412-ADflow-alpha-cd.csv")) as file: - reader = csv.reader(file, delimiter=" ") - cd_adflow = np.array(list(reader)[1:], dtype=np.float32) - - plt.plot(alpha, cd_pred) - plt.plot(cd_adflow[:, 0], cd_adflow[:, 1]) - plt.grid(True) - plt.legend(["Surrogate", "ADflow"]) - plt.title("Drag coefficient") - plt.xlabel("Alpha") - plt.ylabel("Cd") - plt.show() - - -Main -^^^^ - -.. code-block:: python - - """ - Predicting Airfoil Aerodynamics through data by Raul Carreira Rufato and Prof. Joseph Morlier - """ - - import os - import numpy as np - import csv - - from smt.examples.airfoil_parameters.learning_airfoil_parameters import ( - load_cd_training_data, - load_NACA4412_modeshapes, - plot_predictions, - ) - from sklearn.model_selection import train_test_split - from smt.surrogate_models.genn import GENN, load_smt_data - - x, y, dy = load_cd_training_data() - - # splitting the dataset - x_train, x_test, y_train, y_test, dy_train, dy_test = train_test_split( - x, y, dy, train_size=0.8 - ) - # building and training the GENN - genn = GENN(print_global=False) - # learning rate that controls optimizer step size - genn.options["alpha"] = 0.001 - # lambd = 0. = no regularization, lambd > 0 = regularization - genn.options["lambd"] = 0.1 - # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement - genn.options["gamma"] = 1.0 - # number of hidden layers - genn.options["deep"] = 2 - # number of nodes per hidden layer - genn.options["wide"] = 6 - # used to divide data into training batches (use for large data sets) - genn.options["mini_batch_size"] = 256 - # number of passes through data - genn.options["num_epochs"] = 5 - # number of optimizer iterations per mini-batch - genn.options["num_iterations"] = 10 - # print output (or not) - genn.options["is_print"] = False - # convenience function to read in data that is in SMT format - load_smt_data(genn, x_train, y_train, dy_train) - - genn.train() - - ## non-API function to plot training history (to check convergence) - # genn.plot_training_history() - ## non-API function to check accuracy of regression - # genn.goodness_of_fit(x_test, y_test, dy_test) - - # API function to predict values at new (unseen) points - y_pred = genn.predict_values(x_test) - - # Now we will use the trained model to make a prediction with a not-learned form. - # Example Prediction for NACA4412. - # Airfoil mode shapes should be determined according to Bouhlel, M.A., He, S., and Martins, - # J.R.R.A., mSANN Model Benchmarks, Mendeley Data, 2019. https://doi.org/10.17632/ngpd634smf.1 - # Comparison of results with Adflow software for an alpha range from -1 to 7 degrees. Re = 3000000 - airfoil_modeshapes = load_NACA4412_modeshapes() - Ma = 0.3 - alpha = 0 - - # input in neural network is created out of airfoil mode shapes, Mach number and alpha - # airfoil_modeshapes: computed mode_shapes of random airfol geometry with parameterise_airfoil - # Ma: desired Mach number for evaluation in range [0.3,0.6] - # alpha: scalar in range [-1, 6] - input = np.zeros(shape=(1, 16)) - input[0, :14] = airfoil_modeshapes - input[0, 14] = Ma - input[0, -1] = alpha - - # prediction - cd_pred = genn.predict_values(input) - print("Drag coefficient prediction (cd): ", cd_pred[0, 0]) - - plot_predictions(airfoil_modeshapes, Ma, genn) - -:: - - Drag coefficient prediction (cd): 0.01019668488197359 - -.. figure:: learning_airfoil_parameters.png - :scale: 100 % - :align: center - +Learning Airfoil Parameters +=========================== + +This is a tutorial to determine the aerodynamic coefficients of a given airfoil using GENN in SMT (other models could be used as well). +The obtained surrogate model can be used to give predictions for certain Mach numbers, angles of attack and the aerodynamic coefficients. +These calculations can be really useful in case of an airfoil shape optimization. The input parameters uses the airfoil Camber and Thickness mode shapes. + +* Inputs: Airfoil Camber and Thickness mode shapes, Mach, alpha +* Outputs (options): cd, cl, cm + +In this test case, we will be predicting only the Cd coefficient. However, the other databases for the prediction of the +other terms are available in the same repository. Bouhlels mSANN uses the information contained in the paper [1]_ to determine +the airfoil's mode shapes. Moreover, in mSANN a deep neural network is used to predict the Cd parameter of a given parametrized +airfoil. Therefore, in this tutorial, we reproduce the paper [2]_ using the Gradient-Enhanced Neural Networks (GENN) from SMT. + +Briefly explaining how mSANN generates the mode shapes of a given airfoil: + +#. Using inverse distance weighting (IDW) to interpolate the surface function of each airfoil. +#. Then applying singular value decomposition (SVD) to reduce the number of variables that define the airfoil geometry. It includes a total of 14 airfoil modes (seven for camber and seven for thickness). +#. Totally 16 input variables, two flow conditions of Mach number (0.3 to 0.6) and the angle of attack (2 degrees to 6 degrees) plus 14 shape coefficients. +#. The output airfoil aerodynamic force coefficients and their respective gradients are computed using ADflow, which solves the RANS equations with a Spalart-Allmaras turbulence model. + +References +---------- + +.. [1] Bouhlel, M. A., He, S., & Martins, J. R. (2020). Scalable gradient–enhanced artificial neural networks for airfoil shape design in the subsonic and transonic regimes. Structural and Multidisciplinary Optimization, 61(4), 1363-1376. +.. [2] Bouhlel, M. A., He, S., & Martins, J. R. (2019). mSANN Model Benchmarks, Mendeley Data, https://doi.org/10.17632/ngpd634smf.1. +.. [3] Li, J., Bouhlel, M. A., & Martins, J. R. (2019). Data-based approach for fast airfoil analysis and optimization. AIAA Journal, 57(2), 581-596. +.. [4] Bouhlel, M. A., & Martins, J. R. (2019). Gradient-enhanced kriging for high-dimensional problems. Engineering with Computers, 35(1), 157-173. +.. [5] Du, X., He, P., & Martins, J. R. (2021). Rapid airfoil design optimization via neural networks-based parameterization and surrogate modeling. Aerospace Science and Technology, 113, 106701. +.. [6] University of Michigan, Webfoil, 2021. URL http://webfoil.engin.umich.edu/, online accessed on 16 of June 2021. + +Implementation +-------------- + +Utilities +^^^^^^^^^ + +.. code-block:: python + + import os + import numpy as np + import csv + + WORKDIR = os.path.dirname(os.path.abspath(__file__)) + + + def load_NACA4412_modeshapes(): + return np.loadtxt(open(os.path.join(WORKDIR, "modes_NACA4412_ct.txt"))) + + + def load_cd_training_data(): + with open(os.path.join(WORKDIR, "cd_x_y.csv")) as file: + reader = csv.reader(file, delimiter=";") + values = np.array(list(reader), dtype=np.float32) + dim_values = values.shape + x = values[:, : dim_values[1] - 1] + y = values[:, -1] + with open(os.path.join(WORKDIR, "cd_dy.csv")) as file: + reader = csv.reader(file, delimiter=";") + dy = np.array(list(reader), dtype=np.float32) + return x, y, dy + + + def plot_predictions(airfoil_modeshapes, Ma, cd_model): + import matplotlib + + matplotlib.use("Agg") + import matplotlib.pyplot as plt + + # alpha is linearily distributed over the range of -1 to 7 degrees + # while Ma is kept constant + inputs = np.zeros(shape=(1, 15)) + inputs[0, :14] = airfoil_modeshapes + inputs[0, -1] = Ma + inputs = np.tile(inputs, (50, 1)) + + alpha = np.atleast_2d([-1 + 0.16 * i for i in range(50)]).T + + inputs = np.concatenate((inputs, alpha), axis=1) + + # Predict Cd + cd_pred = cd_model.predict_values(inputs) + + # Load ADflow Cd reference + with open(os.path.join(WORKDIR, "NACA4412-ADflow-alpha-cd.csv")) as file: + reader = csv.reader(file, delimiter=" ") + cd_adflow = np.array(list(reader)[1:], dtype=np.float32) + + plt.plot(alpha, cd_pred) + plt.plot(cd_adflow[:, 0], cd_adflow[:, 1]) + plt.grid(True) + plt.legend(["Surrogate", "ADflow"]) + plt.title("Drag coefficient") + plt.xlabel("Alpha") + plt.ylabel("Cd") + plt.show() + + +Main +^^^^ + +.. code-block:: python + + """ + Predicting Airfoil Aerodynamics through data by Raul Carreira Rufato and Prof. Joseph Morlier + """ + + import os + import numpy as np + import csv + + from smt.examples.airfoil_parameters.learning_airfoil_parameters import ( + load_cd_training_data, + load_NACA4412_modeshapes, + plot_predictions, + ) + from sklearn.model_selection import train_test_split + from smt.surrogate_models.genn import GENN, load_smt_data + + x, y, dy = load_cd_training_data() + + # splitting the dataset + x_train, x_test, y_train, y_test, dy_train, dy_test = train_test_split( + x, y, dy, train_size=0.8 + ) + # building and training the GENN + genn = GENN(print_global=False) + # learning rate that controls optimizer step size + genn.options["alpha"] = 0.001 + # lambd = 0. = no regularization, lambd > 0 = regularization + genn.options["lambd"] = 0.1 + # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement + genn.options["gamma"] = 1.0 + # number of hidden layers + genn.options["deep"] = 2 + # number of nodes per hidden layer + genn.options["wide"] = 6 + # used to divide data into training batches (use for large data sets) + genn.options["mini_batch_size"] = 256 + # number of passes through data + genn.options["num_epochs"] = 5 + # number of optimizer iterations per mini-batch + genn.options["num_iterations"] = 10 + # print output (or not) + genn.options["is_print"] = False + # convenience function to read in data that is in SMT format + load_smt_data(genn, x_train, y_train, dy_train) + + genn.train() + + ## non-API function to plot training history (to check convergence) + # genn.plot_training_history() + ## non-API function to check accuracy of regression + # genn.goodness_of_fit(x_test, y_test, dy_test) + + # API function to predict values at new (unseen) points + y_pred = genn.predict_values(x_test) + + # Now we will use the trained model to make a prediction with a not-learned form. + # Example Prediction for NACA4412. + # Airfoil mode shapes should be determined according to Bouhlel, M.A., He, S., and Martins, + # J.R.R.A., mSANN Model Benchmarks, Mendeley Data, 2019. https://doi.org/10.17632/ngpd634smf.1 + # Comparison of results with Adflow software for an alpha range from -1 to 7 degrees. Re = 3000000 + airfoil_modeshapes = load_NACA4412_modeshapes() + Ma = 0.3 + alpha = 0 + + # input in neural network is created out of airfoil mode shapes, Mach number and alpha + # airfoil_modeshapes: computed mode_shapes of random airfol geometry with parameterise_airfoil + # Ma: desired Mach number for evaluation in range [0.3,0.6] + # alpha: scalar in range [-1, 6] + input = np.zeros(shape=(1, 16)) + input[0, :14] = airfoil_modeshapes + input[0, 14] = Ma + input[0, -1] = alpha + + # prediction + cd_pred = genn.predict_values(input) + print("Drag coefficient prediction (cd): ", cd_pred[0, 0]) + + plot_predictions(airfoil_modeshapes, Ma, genn) + +:: + + Drag coefficient prediction (cd): 0.010558830776885974 + +.. figure:: learning_airfoil_parameters.png + :scale: 100 % + :align: center + diff --git a/doc/_src_docs/examples/b777_engine/b777_engine.png b/doc/_src_docs/examples/b777_engine/b777_engine.png index f3d061369..afc3f0920 100644 Binary files a/doc/_src_docs/examples/b777_engine/b777_engine.png and b/doc/_src_docs/examples/b777_engine/b777_engine.png differ diff --git a/doc/_src_docs/examples/b777_engine/b777_engine.rst b/doc/_src_docs/examples/b777_engine/b777_engine.rst index e04f04c61..e7721a8be 100644 --- a/doc/_src_docs/examples/b777_engine/b777_engine.rst +++ b/doc/_src_docs/examples/b777_engine/b777_engine.rst @@ -1,896 +1,896 @@ -Boeing 777 engine data set -========================== - -.. code-block:: python - - import numpy as np - import os - - - def get_b777_engine(): - this_dir = os.path.split(__file__)[0] - - nt = 12 * 11 * 8 - xt = np.loadtxt(os.path.join(this_dir, "b777_engine_inputs.dat")).reshape((nt, 3)) - yt = np.loadtxt(os.path.join(this_dir, "b777_engine_outputs.dat")).reshape((nt, 2)) - dyt_dxt = np.loadtxt(os.path.join(this_dir, "b777_engine_derivs.dat")).reshape( - (nt, 2, 3) - ) - - xlimits = np.array([[0, 0.9], [0, 15], [0, 1.0]]) - - return xt, yt, dyt_dxt, xlimits - - - def plot_b777_engine(xt, yt, limits, interp): - import numpy as np - import matplotlib - - matplotlib.use("Agg") - import matplotlib.pyplot as plt - - val_M = np.array( - [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9] - ) # 12 - val_h = np.array( - [0.0, 0.6096, 1.524, 3.048, 4.572, 6.096, 7.62, 9.144, 10.668, 11.8872, 13.1064] - ) # 11 - val_t = np.array([0.05, 0.2, 0.3, 0.4, 0.6, 0.8, 0.9, 1.0]) # 8 - - def get_pts(xt, yt, iy, ind_M=None, ind_h=None, ind_t=None): - eps = 1e-5 - - if ind_M is not None: - M = val_M[ind_M] - keep = abs(xt[:, 0] - M) < eps - xt = xt[keep, :] - yt = yt[keep, :] - if ind_h is not None: - h = val_h[ind_h] - keep = abs(xt[:, 1] - h) < eps - xt = xt[keep, :] - yt = yt[keep, :] - if ind_t is not None: - t = val_t[ind_t] - keep = abs(xt[:, 2] - t) < eps - xt = xt[keep, :] - yt = yt[keep, :] - - if ind_M is None: - data = xt[:, 0], yt[:, iy] - elif ind_h is None: - data = xt[:, 1], yt[:, iy] - elif ind_t is None: - data = xt[:, 2], yt[:, iy] - - if iy == 0: - data = data[0], data[1] / 1e6 - elif iy == 1: - data = data[0], data[1] / 1e-4 - - return data - - num = 100 - x = np.zeros((num, 3)) - lins_M = np.linspace(0.0, 0.9, num) - lins_h = np.linspace(0.0, 13.1064, num) - lins_t = np.linspace(0.05, 1.0, num) - - def get_x(ind_M=None, ind_h=None, ind_t=None): - x = np.zeros((num, 3)) - x[:, 0] = lins_M - x[:, 1] = lins_h - x[:, 2] = lins_t - if ind_M: - x[:, 0] = val_M[ind_M] - if ind_h: - x[:, 1] = val_h[ind_h] - if ind_t: - x[:, 2] = val_t[ind_t] - return x - - nrow = 6 - ncol = 2 - - ind_M_1 = -2 - ind_M_2 = -5 - - ind_t_1 = 1 - ind_t_2 = -1 - - plt.close() - - # -------------------- - - fig, axs = plt.subplots(6, 2, gridspec_kw={"hspace": 0.5}, figsize=(15, 25)) - - axs[0, 0].set_title("M={}".format(val_M[ind_M_1])) - axs[0, 0].set(xlabel="throttle", ylabel="thrust (x 1e6 N)") - - axs[0, 1].set_title("M={}".format(val_M[ind_M_1])) - axs[0, 1].set(xlabel="throttle", ylabel="SFC (x 1e-3 N/N/s)") - - axs[1, 0].set_title("M={}".format(val_M[ind_M_2])) - axs[1, 0].set(xlabel="throttle", ylabel="thrust (x 1e6 N)") - - axs[1, 1].set_title("M={}".format(val_M[ind_M_2])) - axs[1, 1].set(xlabel="throttle", ylabel="SFC (x 1e-3 N/N/s)") - - # -------------------- - - axs[2, 0].set_title("throttle={}".format(val_t[ind_t_1])) - axs[2, 0].set(xlabel="altitude (km)", ylabel="thrust (x 1e6 N)") - - axs[2, 1].set_title("throttle={}".format(val_t[ind_t_1])) - axs[2, 1].set(xlabel="altitude (km)", ylabel="SFC (x 1e-3 N/N/s)") - - axs[3, 0].set_title("throttle={}".format(val_t[ind_t_2])) - axs[3, 0].set(xlabel="altitude (km)", ylabel="thrust (x 1e6 N)") - - axs[3, 1].set_title("throttle={}".format(val_t[ind_t_2])) - axs[3, 1].set(xlabel="altitude (km)", ylabel="SFC (x 1e-3 N/N/s)") - - # -------------------- - - axs[4, 0].set_title("throttle={}".format(val_t[ind_t_1])) - axs[4, 0].set(xlabel="Mach number", ylabel="thrust (x 1e6 N)") - - axs[4, 1].set_title("throttle={}".format(val_t[ind_t_1])) - axs[4, 1].set(xlabel="Mach number", ylabel="SFC (x 1e-3 N/N/s)") - - axs[5, 0].set_title("throttle={}".format(val_t[ind_t_2])) - axs[5, 0].set(xlabel="Mach number", ylabel="thrust (x 1e6 N)") - - axs[5, 1].set_title("throttle={}".format(val_t[ind_t_2])) - axs[5, 1].set(xlabel="Mach number", ylabel="SFC (x 1e-3 N/N/s)") - - ind_h_list = [0, 4, 7, 10] - ind_h_list = [4, 7, 10] - - ind_M_list = [0, 3, 6, 11] - ind_M_list = [3, 6, 11] - - colors = ["b", "r", "g", "c", "m"] - - # ----------------------------------------------------------------------------- - - # Throttle slices - for k, ind_h in enumerate(ind_h_list): - ind_M = ind_M_1 - x = get_x(ind_M=ind_M, ind_h=ind_h) - y = interp.predict_values(x) - - xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_h=ind_h) - axs[0, 0].plot(xt_, yt_, "o" + colors[k]) - axs[0, 0].plot(lins_t, y[:, 0] / 1e6, colors[k]) - - xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_h=ind_h) - axs[0, 1].plot(xt_, yt_, "o" + colors[k]) - axs[0, 1].plot(lins_t, y[:, 1] / 1e-4, colors[k]) - - ind_M = ind_M_2 - x = get_x(ind_M=ind_M, ind_h=ind_h) - y = interp.predict_values(x) - - xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_h=ind_h) - axs[1, 0].plot(xt_, yt_, "o" + colors[k]) - axs[1, 0].plot(lins_t, y[:, 0] / 1e6, colors[k]) - - xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_h=ind_h) - axs[1, 1].plot(xt_, yt_, "o" + colors[k]) - axs[1, 1].plot(lins_t, y[:, 1] / 1e-4, colors[k]) - - # ----------------------------------------------------------------------------- - - # Altitude slices - for k, ind_M in enumerate(ind_M_list): - ind_t = ind_t_1 - x = get_x(ind_M=ind_M, ind_t=ind_t) - y = interp.predict_values(x) - - xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_t=ind_t) - axs[2, 0].plot(xt_, yt_, "o" + colors[k]) - axs[2, 0].plot(lins_h, y[:, 0] / 1e6, colors[k]) - - xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_t=ind_t) - axs[2, 1].plot(xt_, yt_, "o" + colors[k]) - axs[2, 1].plot(lins_h, y[:, 1] / 1e-4, colors[k]) - - ind_t = ind_t_2 - x = get_x(ind_M=ind_M, ind_t=ind_t) - y = interp.predict_values(x) - - xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_t=ind_t) - axs[3, 0].plot(xt_, yt_, "o" + colors[k]) - axs[3, 0].plot(lins_h, y[:, 0] / 1e6, colors[k]) - - xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_t=ind_t) - axs[3, 1].plot(xt_, yt_, "o" + colors[k]) - axs[3, 1].plot(lins_h, y[:, 1] / 1e-4, colors[k]) - - # ----------------------------------------------------------------------------- - - # Mach number slices - for k, ind_h in enumerate(ind_h_list): - ind_t = ind_t_1 - x = get_x(ind_t=ind_t, ind_h=ind_h) - y = interp.predict_values(x) - - xt_, yt_ = get_pts(xt, yt, 0, ind_h=ind_h, ind_t=ind_t) - axs[4, 0].plot(xt_, yt_, "o" + colors[k]) - axs[4, 0].plot(lins_M, y[:, 0] / 1e6, colors[k]) - - xt_, yt_ = get_pts(xt, yt, 1, ind_h=ind_h, ind_t=ind_t) - axs[4, 1].plot(xt_, yt_, "o" + colors[k]) - axs[4, 1].plot(lins_M, y[:, 1] / 1e-4, colors[k]) - - ind_t = ind_t_2 - x = get_x(ind_t=ind_t, ind_h=ind_h) - y = interp.predict_values(x) - - xt_, yt_ = get_pts(xt, yt, 0, ind_h=ind_h, ind_t=ind_t) - axs[5, 0].plot(xt_, yt_, "o" + colors[k]) - axs[5, 0].plot(lins_M, y[:, 0] / 1e6, colors[k]) - - xt_, yt_ = get_pts(xt, yt, 1, ind_h=ind_h, ind_t=ind_t) - axs[5, 1].plot(xt_, yt_, "o" + colors[k]) - axs[5, 1].plot(lins_M, y[:, 1] / 1e-4, colors[k]) - - # ----------------------------------------------------------------------------- - - for k in range(2): - legend_entries = [] - for ind_h in ind_h_list: - legend_entries.append("h={}".format(val_h[ind_h])) - legend_entries.append("") - - axs[k, 0].legend(legend_entries) - axs[k, 1].legend(legend_entries) - - axs[k + 4, 0].legend(legend_entries) - axs[k + 4, 1].legend(legend_entries) - - legend_entries = [] - for ind_M in ind_M_list: - legend_entries.append("M={}".format(val_M[ind_M])) - legend_entries.append("") - - axs[k + 2, 0].legend(legend_entries) - axs[k + 2, 1].legend(legend_entries) - - plt.show() - - -RMTB ----- - -.. code-block:: python - - from smt.surrogate_models import RMTB - from smt.examples.b777_engine.b777_engine import get_b777_engine, plot_b777_engine - - xt, yt, dyt_dxt, xlimits = get_b777_engine() - - interp = RMTB( - num_ctrl_pts=15, - xlimits=xlimits, - nonlinear_maxiter=20, - approx_order=2, - energy_weight=0e-14, - regularization_weight=0e-18, - extrapolate=True, - ) - interp.set_training_values(xt, yt) - interp.set_training_derivatives(xt, dyt_dxt[:, :, 0], 0) - interp.set_training_derivatives(xt, dyt_dxt[:, :, 1], 1) - interp.set_training_derivatives(xt, dyt_dxt[:, :, 2], 2) - interp.train() - - plot_b777_engine(xt, yt, xlimits, interp) - -:: - - ___________________________________________________________________________ - - RMTB - ___________________________________________________________________________ - - Problem size - - # training points. : 1056 - - ___________________________________________________________________________ - - Training - - Training ... - Pre-computing matrices ... - Computing dof2coeff ... - Computing dof2coeff - done. Time (sec): 0.0000024 - Initializing Hessian ... - Initializing Hessian - done. Time (sec): 0.0004523 - Computing energy terms ... - Computing energy terms - done. Time (sec): 0.2658696 - Computing approximation terms ... - Computing approximation terms - done. Time (sec): 0.0096512 - Pre-computing matrices - done. Time (sec): 0.2760413 - Solving for degrees of freedom ... - Solving initial startup problem (n=3375) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 4.857178281e+07 2.642628384e+13 - Iteration (num., iy, grad. norm, func.) : 0 0 1.275872076e+05 7.014980388e+09 - Solving for output 0 - done. Time (sec): 0.0893450 - Solving for output 1 ... - Iteration (num., iy, grad. norm, func.) : 0 1 3.711896708e-01 7.697335516e-04 - Iteration (num., iy, grad. norm, func.) : 0 1 1.242232475e-03 3.529813372e-07 - Solving for output 1 - done. Time (sec): 0.0883675 - Solving initial startup problem (n=3375) - done. Time (sec): 0.1778002 - Solving nonlinear problem (n=3375) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 1.275872076e+05 7.014980388e+09 - Iteration (num., iy, grad. norm, func.) : 0 0 9.288085134e+04 1.953155630e+09 - Iteration (num., iy, grad. norm, func.) : 1 0 4.926070874e+04 5.635278864e+08 - Iteration (num., iy, grad. norm, func.) : 2 0 3.679856063e+04 3.897611903e+08 - Iteration (num., iy, grad. norm, func.) : 3 0 3.383510086e+04 3.787123178e+08 - Iteration (num., iy, grad. norm, func.) : 4 0 2.589886846e+04 3.284192662e+08 - Iteration (num., iy, grad. norm, func.) : 5 0 1.845592605e+04 3.023670809e+08 - Iteration (num., iy, grad. norm, func.) : 6 0 1.769372828e+04 2.681329751e+08 - Iteration (num., iy, grad. norm, func.) : 7 0 1.530447070e+04 2.242731783e+08 - Iteration (num., iy, grad. norm, func.) : 8 0 9.881826932e+03 2.023427168e+08 - Iteration (num., iy, grad. norm, func.) : 9 0 1.588522663e+04 1.864877083e+08 - Iteration (num., iy, grad. norm, func.) : 10 0 7.425937050e+03 1.768325991e+08 - Iteration (num., iy, grad. norm, func.) : 11 0 6.171437551e+03 1.679045359e+08 - Iteration (num., iy, grad. norm, func.) : 12 0 1.069917671e+04 1.620825061e+08 - Iteration (num., iy, grad. norm, func.) : 13 0 3.653875897e+03 1.586612281e+08 - Iteration (num., iy, grad. norm, func.) : 14 0 3.162616646e+03 1.574698211e+08 - Iteration (num., iy, grad. norm, func.) : 15 0 3.260390990e+03 1.550613438e+08 - Iteration (num., iy, grad. norm, func.) : 16 0 2.544518278e+03 1.518997468e+08 - Iteration (num., iy, grad. norm, func.) : 17 0 2.438954004e+03 1.497917708e+08 - Iteration (num., iy, grad. norm, func.) : 18 0 2.020600794e+03 1.492028567e+08 - Iteration (num., iy, grad. norm, func.) : 19 0 1.472982211e+03 1.488930748e+08 - Solving for output 0 - done. Time (sec): 1.7447991 - Solving for output 1 ... - Iteration (num., iy, grad. norm, func.) : 0 1 1.242232475e-03 3.529813372e-07 - Iteration (num., iy, grad. norm, func.) : 0 1 3.291537296e-04 6.204977989e-08 - Iteration (num., iy, grad. norm, func.) : 1 1 2.793575829e-04 1.823401328e-08 - Iteration (num., iy, grad. norm, func.) : 2 1 2.067622624e-04 8.324728593e-09 - Iteration (num., iy, grad. norm, func.) : 3 1 1.663771237e-04 7.723689681e-09 - Iteration (num., iy, grad. norm, func.) : 4 1 1.161962999e-04 6.778016439e-09 - Iteration (num., iy, grad. norm, func.) : 5 1 1.410753454e-04 5.166822345e-09 - Iteration (num., iy, grad. norm, func.) : 6 1 4.021446522e-05 2.975869191e-09 - Iteration (num., iy, grad. norm, func.) : 7 1 3.893738412e-05 2.106018741e-09 - Iteration (num., iy, grad. norm, func.) : 8 1 3.462947412e-05 1.806092003e-09 - Iteration (num., iy, grad. norm, func.) : 9 1 3.829940623e-05 1.704500814e-09 - Iteration (num., iy, grad. norm, func.) : 10 1 2.893554551e-05 1.589471783e-09 - Iteration (num., iy, grad. norm, func.) : 11 1 2.455492027e-05 1.434957749e-09 - Iteration (num., iy, grad. norm, func.) : 12 1 1.181000189e-05 1.306210742e-09 - Iteration (num., iy, grad. norm, func.) : 13 1 1.801702275e-05 1.256716689e-09 - Iteration (num., iy, grad. norm, func.) : 14 1 9.540420207e-06 1.228605583e-09 - Iteration (num., iy, grad. norm, func.) : 15 1 8.923156659e-06 1.214274817e-09 - Iteration (num., iy, grad. norm, func.) : 16 1 1.032728857e-05 1.187418654e-09 - Iteration (num., iy, grad. norm, func.) : 17 1 1.197810779e-05 1.156131278e-09 - Iteration (num., iy, grad. norm, func.) : 18 1 5.640649200e-06 1.139675008e-09 - Iteration (num., iy, grad. norm, func.) : 19 1 5.800155030e-06 1.138464239e-09 - Solving for output 1 - done. Time (sec): 1.7479448 - Solving nonlinear problem (n=3375) - done. Time (sec): 3.4928083 - Solving for degrees of freedom - done. Time (sec): 3.6706922 - Training - done. Time (sec): 3.9474902 - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0012105 - - Prediction time/pt. (sec) : 0.0000121 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011418 - - Prediction time/pt. (sec) : 0.0000114 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011227 - - Prediction time/pt. (sec) : 0.0000112 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011294 - - Prediction time/pt. (sec) : 0.0000113 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011373 - - Prediction time/pt. (sec) : 0.0000114 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011530 - - Prediction time/pt. (sec) : 0.0000115 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011668 - - Prediction time/pt. (sec) : 0.0000117 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011950 - - Prediction time/pt. (sec) : 0.0000119 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011606 - - Prediction time/pt. (sec) : 0.0000116 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011237 - - Prediction time/pt. (sec) : 0.0000112 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011373 - - Prediction time/pt. (sec) : 0.0000114 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011415 - - Prediction time/pt. (sec) : 0.0000114 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011349 - - Prediction time/pt. (sec) : 0.0000113 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011733 - - Prediction time/pt. (sec) : 0.0000117 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011494 - - Prediction time/pt. (sec) : 0.0000115 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011330 - - Prediction time/pt. (sec) : 0.0000113 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011353 - - Prediction time/pt. (sec) : 0.0000114 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0011313 - - Prediction time/pt. (sec) : 0.0000113 - - -.. figure:: b777_engine.png - :scale: 60 % - :align: center - -RMTC ----- - -.. code-block:: python - - from smt.surrogate_models import RMTC - from smt.examples.b777_engine.b777_engine import get_b777_engine, plot_b777_engine - - xt, yt, dyt_dxt, xlimits = get_b777_engine() - - interp = RMTC( - num_elements=6, - xlimits=xlimits, - nonlinear_maxiter=20, - approx_order=2, - energy_weight=0.0, - regularization_weight=0.0, - extrapolate=True, - ) - interp.set_training_values(xt, yt) - interp.set_training_derivatives(xt, dyt_dxt[:, :, 0], 0) - interp.set_training_derivatives(xt, dyt_dxt[:, :, 1], 1) - interp.set_training_derivatives(xt, dyt_dxt[:, :, 2], 2) - interp.train() - - plot_b777_engine(xt, yt, xlimits, interp) - -:: - - ___________________________________________________________________________ - - RMTC - ___________________________________________________________________________ - - Problem size - - # training points. : 1056 - - ___________________________________________________________________________ - - Training - - Training ... - Pre-computing matrices ... - Computing dof2coeff ... - Computing dof2coeff - done. Time (sec): 0.0241230 - Initializing Hessian ... - Initializing Hessian - done. Time (sec): 0.0004451 - Computing energy terms ... - Computing energy terms - done. Time (sec): 0.1647394 - Computing approximation terms ... - Computing approximation terms - done. Time (sec): 0.0804243 - Pre-computing matrices - done. Time (sec): 0.2698176 - Solving for degrees of freedom ... - Solving initial startup problem (n=2744) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 7.864862172e+07 2.642628384e+13 - Iteration (num., iy, grad. norm, func.) : 0 0 2.083097286e+05 2.064477927e+09 - Solving for output 0 - done. Time (sec): 0.1904421 - Solving for output 1 ... - Iteration (num., iy, grad. norm, func.) : 0 1 8.095040141e-01 7.697335516e-04 - Iteration (num., iy, grad. norm, func.) : 0 1 1.246264808e-03 1.322534258e-07 - Solving for output 1 - done. Time (sec): 0.1890645 - Solving initial startup problem (n=2744) - done. Time (sec): 0.3795958 - Solving nonlinear problem (n=2744) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 2.083097286e+05 2.064477927e+09 - Iteration (num., iy, grad. norm, func.) : 0 0 2.921093469e+04 4.215575599e+08 - Iteration (num., iy, grad. norm, func.) : 1 0 1.624892907e+04 3.528031953e+08 - Iteration (num., iy, grad. norm, func.) : 2 0 2.265072322e+04 3.503556515e+08 - Iteration (num., iy, grad. norm, func.) : 3 0 1.094482934e+04 3.373018101e+08 - Iteration (num., iy, grad. norm, func.) : 4 0 4.467562615e+03 3.327042553e+08 - Iteration (num., iy, grad. norm, func.) : 5 0 4.726704301e+03 3.320707375e+08 - Iteration (num., iy, grad. norm, func.) : 6 0 3.053207732e+03 3.312934476e+08 - Iteration (num., iy, grad. norm, func.) : 7 0 2.770622919e+03 3.307231558e+08 - Iteration (num., iy, grad. norm, func.) : 8 0 1.263294650e+03 3.304665608e+08 - Iteration (num., iy, grad. norm, func.) : 9 0 2.036122391e+03 3.303510963e+08 - Iteration (num., iy, grad. norm, func.) : 10 0 9.662280290e+02 3.302094351e+08 - Iteration (num., iy, grad. norm, func.) : 11 0 1.595601766e+03 3.301317782e+08 - Iteration (num., iy, grad. norm, func.) : 12 0 6.853346133e+02 3.300060973e+08 - Iteration (num., iy, grad. norm, func.) : 13 0 7.941751018e+02 3.299070838e+08 - Iteration (num., iy, grad. norm, func.) : 14 0 5.327508408e+02 3.298430938e+08 - Iteration (num., iy, grad. norm, func.) : 15 0 6.686481516e+02 3.298390706e+08 - Iteration (num., iy, grad. norm, func.) : 16 0 4.278757016e+02 3.298282583e+08 - Iteration (num., iy, grad. norm, func.) : 17 0 1.024292501e+03 3.298140054e+08 - Iteration (num., iy, grad. norm, func.) : 18 0 3.840733792e+02 3.297950995e+08 - Iteration (num., iy, grad. norm, func.) : 19 0 2.237764944e+02 3.297923893e+08 - Solving for output 0 - done. Time (sec): 3.8310549 - Solving for output 1 ... - Iteration (num., iy, grad. norm, func.) : 0 1 1.246264808e-03 1.322534258e-07 - Iteration (num., iy, grad. norm, func.) : 0 1 4.125228033e-04 9.564646442e-09 - Iteration (num., iy, grad. norm, func.) : 1 1 2.809791486e-04 7.886584381e-09 - Iteration (num., iy, grad. norm, func.) : 2 1 2.227459244e-04 6.103691176e-09 - Iteration (num., iy, grad. norm, func.) : 3 1 1.165820010e-04 4.322015707e-09 - Iteration (num., iy, grad. norm, func.) : 4 1 7.944000757e-05 4.069136622e-09 - Iteration (num., iy, grad. norm, func.) : 5 1 6.086550528e-05 3.750833482e-09 - Iteration (num., iy, grad. norm, func.) : 6 1 4.597318407e-05 3.367615953e-09 - Iteration (num., iy, grad. norm, func.) : 7 1 3.570441158e-05 3.208671412e-09 - Iteration (num., iy, grad. norm, func.) : 8 1 4.355301841e-05 3.127337695e-09 - Iteration (num., iy, grad. norm, func.) : 9 1 2.486877560e-05 3.067459784e-09 - Iteration (num., iy, grad. norm, func.) : 10 1 2.640078765e-05 3.044941197e-09 - Iteration (num., iy, grad. norm, func.) : 11 1 3.485205621e-05 3.030522947e-09 - Iteration (num., iy, grad. norm, func.) : 12 1 1.829552973e-05 3.001917844e-09 - Iteration (num., iy, grad. norm, func.) : 13 1 2.370540548e-05 2.986897120e-09 - Iteration (num., iy, grad. norm, func.) : 14 1 1.845750980e-05 2.959996700e-09 - Iteration (num., iy, grad. norm, func.) : 15 1 1.453401168e-05 2.936225153e-09 - Iteration (num., iy, grad. norm, func.) : 16 1 8.584570383e-06 2.924598280e-09 - Iteration (num., iy, grad. norm, func.) : 17 1 7.167158781e-06 2.923628026e-09 - Iteration (num., iy, grad. norm, func.) : 18 1 5.666657239e-06 2.922637642e-09 - Iteration (num., iy, grad. norm, func.) : 19 1 1.075729147e-05 2.920150371e-09 - Solving for output 1 - done. Time (sec): 3.7937956 - Solving nonlinear problem (n=2744) - done. Time (sec): 7.6249423 - Solving for degrees of freedom - done. Time (sec): 8.0046196 - Training - done. Time (sec): 8.2772305 - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0028212 - - Prediction time/pt. (sec) : 0.0000282 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0027299 - - Prediction time/pt. (sec) : 0.0000273 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0027378 - - Prediction time/pt. (sec) : 0.0000274 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0027292 - - Prediction time/pt. (sec) : 0.0000273 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0028083 - - Prediction time/pt. (sec) : 0.0000281 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0028942 - - Prediction time/pt. (sec) : 0.0000289 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0026293 - - Prediction time/pt. (sec) : 0.0000263 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0025048 - - Prediction time/pt. (sec) : 0.0000250 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0027463 - - Prediction time/pt. (sec) : 0.0000275 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0026333 - - Prediction time/pt. (sec) : 0.0000263 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0026276 - - Prediction time/pt. (sec) : 0.0000263 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0025115 - - Prediction time/pt. (sec) : 0.0000251 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0027626 - - Prediction time/pt. (sec) : 0.0000276 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0026529 - - Prediction time/pt. (sec) : 0.0000265 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0027602 - - Prediction time/pt. (sec) : 0.0000276 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0026464 - - Prediction time/pt. (sec) : 0.0000265 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0028572 - - Prediction time/pt. (sec) : 0.0000286 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0027480 - - Prediction time/pt. (sec) : 0.0000275 - - -.. figure:: b777_engine.png - :scale: 60 % - :align: center +Boeing 777 engine data set +========================== + +.. code-block:: python + + import numpy as np + import os + + + def get_b777_engine(): + this_dir = os.path.split(__file__)[0] + + nt = 12 * 11 * 8 + xt = np.loadtxt(os.path.join(this_dir, "b777_engine_inputs.dat")).reshape((nt, 3)) + yt = np.loadtxt(os.path.join(this_dir, "b777_engine_outputs.dat")).reshape((nt, 2)) + dyt_dxt = np.loadtxt(os.path.join(this_dir, "b777_engine_derivs.dat")).reshape( + (nt, 2, 3) + ) + + xlimits = np.array([[0, 0.9], [0, 15], [0, 1.0]]) + + return xt, yt, dyt_dxt, xlimits + + + def plot_b777_engine(xt, yt, limits, interp): + import numpy as np + import matplotlib + + matplotlib.use("Agg") + import matplotlib.pyplot as plt + + val_M = np.array( + [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9] + ) # 12 + val_h = np.array( + [0.0, 0.6096, 1.524, 3.048, 4.572, 6.096, 7.62, 9.144, 10.668, 11.8872, 13.1064] + ) # 11 + val_t = np.array([0.05, 0.2, 0.3, 0.4, 0.6, 0.8, 0.9, 1.0]) # 8 + + def get_pts(xt, yt, iy, ind_M=None, ind_h=None, ind_t=None): + eps = 1e-5 + + if ind_M is not None: + M = val_M[ind_M] + keep = abs(xt[:, 0] - M) < eps + xt = xt[keep, :] + yt = yt[keep, :] + if ind_h is not None: + h = val_h[ind_h] + keep = abs(xt[:, 1] - h) < eps + xt = xt[keep, :] + yt = yt[keep, :] + if ind_t is not None: + t = val_t[ind_t] + keep = abs(xt[:, 2] - t) < eps + xt = xt[keep, :] + yt = yt[keep, :] + + if ind_M is None: + data = xt[:, 0], yt[:, iy] + elif ind_h is None: + data = xt[:, 1], yt[:, iy] + elif ind_t is None: + data = xt[:, 2], yt[:, iy] + + if iy == 0: + data = data[0], data[1] / 1e6 + elif iy == 1: + data = data[0], data[1] / 1e-4 + + return data + + num = 100 + x = np.zeros((num, 3)) + lins_M = np.linspace(0.0, 0.9, num) + lins_h = np.linspace(0.0, 13.1064, num) + lins_t = np.linspace(0.05, 1.0, num) + + def get_x(ind_M=None, ind_h=None, ind_t=None): + x = np.zeros((num, 3)) + x[:, 0] = lins_M + x[:, 1] = lins_h + x[:, 2] = lins_t + if ind_M: + x[:, 0] = val_M[ind_M] + if ind_h: + x[:, 1] = val_h[ind_h] + if ind_t: + x[:, 2] = val_t[ind_t] + return x + + nrow = 6 + ncol = 2 + + ind_M_1 = -2 + ind_M_2 = -5 + + ind_t_1 = 1 + ind_t_2 = -1 + + plt.close() + + # -------------------- + + fig, axs = plt.subplots(6, 2, gridspec_kw={"hspace": 0.5}, figsize=(15, 25)) + + axs[0, 0].set_title("M={}".format(val_M[ind_M_1])) + axs[0, 0].set(xlabel="throttle", ylabel="thrust (x 1e6 N)") + + axs[0, 1].set_title("M={}".format(val_M[ind_M_1])) + axs[0, 1].set(xlabel="throttle", ylabel="SFC (x 1e-3 N/N/s)") + + axs[1, 0].set_title("M={}".format(val_M[ind_M_2])) + axs[1, 0].set(xlabel="throttle", ylabel="thrust (x 1e6 N)") + + axs[1, 1].set_title("M={}".format(val_M[ind_M_2])) + axs[1, 1].set(xlabel="throttle", ylabel="SFC (x 1e-3 N/N/s)") + + # -------------------- + + axs[2, 0].set_title("throttle={}".format(val_t[ind_t_1])) + axs[2, 0].set(xlabel="altitude (km)", ylabel="thrust (x 1e6 N)") + + axs[2, 1].set_title("throttle={}".format(val_t[ind_t_1])) + axs[2, 1].set(xlabel="altitude (km)", ylabel="SFC (x 1e-3 N/N/s)") + + axs[3, 0].set_title("throttle={}".format(val_t[ind_t_2])) + axs[3, 0].set(xlabel="altitude (km)", ylabel="thrust (x 1e6 N)") + + axs[3, 1].set_title("throttle={}".format(val_t[ind_t_2])) + axs[3, 1].set(xlabel="altitude (km)", ylabel="SFC (x 1e-3 N/N/s)") + + # -------------------- + + axs[4, 0].set_title("throttle={}".format(val_t[ind_t_1])) + axs[4, 0].set(xlabel="Mach number", ylabel="thrust (x 1e6 N)") + + axs[4, 1].set_title("throttle={}".format(val_t[ind_t_1])) + axs[4, 1].set(xlabel="Mach number", ylabel="SFC (x 1e-3 N/N/s)") + + axs[5, 0].set_title("throttle={}".format(val_t[ind_t_2])) + axs[5, 0].set(xlabel="Mach number", ylabel="thrust (x 1e6 N)") + + axs[5, 1].set_title("throttle={}".format(val_t[ind_t_2])) + axs[5, 1].set(xlabel="Mach number", ylabel="SFC (x 1e-3 N/N/s)") + + ind_h_list = [0, 4, 7, 10] + ind_h_list = [4, 7, 10] + + ind_M_list = [0, 3, 6, 11] + ind_M_list = [3, 6, 11] + + colors = ["b", "r", "g", "c", "m"] + + # ----------------------------------------------------------------------------- + + # Throttle slices + for k, ind_h in enumerate(ind_h_list): + ind_M = ind_M_1 + x = get_x(ind_M=ind_M, ind_h=ind_h) + y = interp.predict_values(x) + + xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_h=ind_h) + axs[0, 0].plot(xt_, yt_, "o" + colors[k]) + axs[0, 0].plot(lins_t, y[:, 0] / 1e6, colors[k]) + + xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_h=ind_h) + axs[0, 1].plot(xt_, yt_, "o" + colors[k]) + axs[0, 1].plot(lins_t, y[:, 1] / 1e-4, colors[k]) + + ind_M = ind_M_2 + x = get_x(ind_M=ind_M, ind_h=ind_h) + y = interp.predict_values(x) + + xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_h=ind_h) + axs[1, 0].plot(xt_, yt_, "o" + colors[k]) + axs[1, 0].plot(lins_t, y[:, 0] / 1e6, colors[k]) + + xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_h=ind_h) + axs[1, 1].plot(xt_, yt_, "o" + colors[k]) + axs[1, 1].plot(lins_t, y[:, 1] / 1e-4, colors[k]) + + # ----------------------------------------------------------------------------- + + # Altitude slices + for k, ind_M in enumerate(ind_M_list): + ind_t = ind_t_1 + x = get_x(ind_M=ind_M, ind_t=ind_t) + y = interp.predict_values(x) + + xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_t=ind_t) + axs[2, 0].plot(xt_, yt_, "o" + colors[k]) + axs[2, 0].plot(lins_h, y[:, 0] / 1e6, colors[k]) + + xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_t=ind_t) + axs[2, 1].plot(xt_, yt_, "o" + colors[k]) + axs[2, 1].plot(lins_h, y[:, 1] / 1e-4, colors[k]) + + ind_t = ind_t_2 + x = get_x(ind_M=ind_M, ind_t=ind_t) + y = interp.predict_values(x) + + xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_t=ind_t) + axs[3, 0].plot(xt_, yt_, "o" + colors[k]) + axs[3, 0].plot(lins_h, y[:, 0] / 1e6, colors[k]) + + xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_t=ind_t) + axs[3, 1].plot(xt_, yt_, "o" + colors[k]) + axs[3, 1].plot(lins_h, y[:, 1] / 1e-4, colors[k]) + + # ----------------------------------------------------------------------------- + + # Mach number slices + for k, ind_h in enumerate(ind_h_list): + ind_t = ind_t_1 + x = get_x(ind_t=ind_t, ind_h=ind_h) + y = interp.predict_values(x) + + xt_, yt_ = get_pts(xt, yt, 0, ind_h=ind_h, ind_t=ind_t) + axs[4, 0].plot(xt_, yt_, "o" + colors[k]) + axs[4, 0].plot(lins_M, y[:, 0] / 1e6, colors[k]) + + xt_, yt_ = get_pts(xt, yt, 1, ind_h=ind_h, ind_t=ind_t) + axs[4, 1].plot(xt_, yt_, "o" + colors[k]) + axs[4, 1].plot(lins_M, y[:, 1] / 1e-4, colors[k]) + + ind_t = ind_t_2 + x = get_x(ind_t=ind_t, ind_h=ind_h) + y = interp.predict_values(x) + + xt_, yt_ = get_pts(xt, yt, 0, ind_h=ind_h, ind_t=ind_t) + axs[5, 0].plot(xt_, yt_, "o" + colors[k]) + axs[5, 0].plot(lins_M, y[:, 0] / 1e6, colors[k]) + + xt_, yt_ = get_pts(xt, yt, 1, ind_h=ind_h, ind_t=ind_t) + axs[5, 1].plot(xt_, yt_, "o" + colors[k]) + axs[5, 1].plot(lins_M, y[:, 1] / 1e-4, colors[k]) + + # ----------------------------------------------------------------------------- + + for k in range(2): + legend_entries = [] + for ind_h in ind_h_list: + legend_entries.append("h={}".format(val_h[ind_h])) + legend_entries.append("") + + axs[k, 0].legend(legend_entries) + axs[k, 1].legend(legend_entries) + + axs[k + 4, 0].legend(legend_entries) + axs[k + 4, 1].legend(legend_entries) + + legend_entries = [] + for ind_M in ind_M_list: + legend_entries.append("M={}".format(val_M[ind_M])) + legend_entries.append("") + + axs[k + 2, 0].legend(legend_entries) + axs[k + 2, 1].legend(legend_entries) + + plt.show() + + +RMTB +---- + +.. code-block:: python + + from smt.surrogate_models import RMTB + from smt.examples.b777_engine.b777_engine import get_b777_engine, plot_b777_engine + + xt, yt, dyt_dxt, xlimits = get_b777_engine() + + interp = RMTB( + num_ctrl_pts=15, + xlimits=xlimits, + nonlinear_maxiter=20, + approx_order=2, + energy_weight=0e-14, + regularization_weight=0e-18, + extrapolate=True, + ) + interp.set_training_values(xt, yt) + interp.set_training_derivatives(xt, dyt_dxt[:, :, 0], 0) + interp.set_training_derivatives(xt, dyt_dxt[:, :, 1], 1) + interp.set_training_derivatives(xt, dyt_dxt[:, :, 2], 2) + interp.train() + + plot_b777_engine(xt, yt, xlimits, interp) + +:: + + ___________________________________________________________________________ + + RMTB + ___________________________________________________________________________ + + Problem size + + # training points. : 1056 + + ___________________________________________________________________________ + + Training + + Training ... + Pre-computing matrices ... + Computing dof2coeff ... + Computing dof2coeff - done. Time (sec): 0.0000000 + Initializing Hessian ... + Initializing Hessian - done. Time (sec): 0.0009980 + Computing energy terms ... + Computing energy terms - done. Time (sec): 0.1937890 + Computing approximation terms ... + Computing approximation terms - done. Time (sec): 0.0059414 + Pre-computing matrices - done. Time (sec): 0.2007284 + Solving for degrees of freedom ... + Solving initial startup problem (n=3375) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 4.857178281e+07 2.642628384e+13 + Iteration (num., iy, grad. norm, func.) : 0 0 1.364349733e+05 7.002441710e+09 + Solving for output 0 - done. Time (sec): 0.0641978 + Solving for output 1 ... + Iteration (num., iy, grad. norm, func.) : 0 1 3.711896708e-01 7.697335516e-04 + Iteration (num., iy, grad. norm, func.) : 0 1 1.384257034e-03 3.512467641e-07 + Solving for output 1 - done. Time (sec): 0.0610621 + Solving initial startup problem (n=3375) - done. Time (sec): 0.1252599 + Solving nonlinear problem (n=3375) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 1.364349733e+05 7.002441710e+09 + Iteration (num., iy, grad. norm, func.) : 0 0 7.401682427e+04 1.956585489e+09 + Iteration (num., iy, grad. norm, func.) : 1 0 4.640761309e+04 5.653768085e+08 + Iteration (num., iy, grad. norm, func.) : 2 0 3.726949662e+04 3.860194807e+08 + Iteration (num., iy, grad. norm, func.) : 3 0 3.244331543e+04 3.735217325e+08 + Iteration (num., iy, grad. norm, func.) : 4 0 2.356309977e+04 3.232040667e+08 + Iteration (num., iy, grad. norm, func.) : 5 0 1.896770441e+04 2.970854602e+08 + Iteration (num., iy, grad. norm, func.) : 6 0 1.168979712e+04 2.643923864e+08 + Iteration (num., iy, grad. norm, func.) : 7 0 1.199133401e+04 2.223771115e+08 + Iteration (num., iy, grad. norm, func.) : 8 0 9.363877631e+03 2.013234589e+08 + Iteration (num., iy, grad. norm, func.) : 9 0 9.544160641e+03 1.861724031e+08 + Iteration (num., iy, grad. norm, func.) : 10 0 9.458916793e+03 1.762819815e+08 + Iteration (num., iy, grad. norm, func.) : 11 0 4.152198214e+03 1.661887141e+08 + Iteration (num., iy, grad. norm, func.) : 12 0 8.359804107e+03 1.619868009e+08 + Iteration (num., iy, grad. norm, func.) : 13 0 2.678073894e+03 1.599839425e+08 + Iteration (num., iy, grad. norm, func.) : 14 0 2.301049932e+03 1.583627245e+08 + Iteration (num., iy, grad. norm, func.) : 15 0 3.127472449e+03 1.554361115e+08 + Iteration (num., iy, grad. norm, func.) : 16 0 2.879195835e+03 1.516054749e+08 + Iteration (num., iy, grad. norm, func.) : 17 0 1.583184160e+03 1.493412967e+08 + Iteration (num., iy, grad. norm, func.) : 18 0 2.202973513e+03 1.492035778e+08 + Iteration (num., iy, grad. norm, func.) : 19 0 1.397841194e+03 1.489828558e+08 + Solving for output 0 - done. Time (sec): 1.2642355 + Solving for output 1 ... + Iteration (num., iy, grad. norm, func.) : 0 1 1.384257034e-03 3.512467641e-07 + Iteration (num., iy, grad. norm, func.) : 0 1 3.575138262e-04 6.166597300e-08 + Iteration (num., iy, grad. norm, func.) : 1 1 3.156992731e-04 1.817140551e-08 + Iteration (num., iy, grad. norm, func.) : 2 1 2.070220585e-04 8.504635606e-09 + Iteration (num., iy, grad. norm, func.) : 3 1 1.711558893e-04 7.824284644e-09 + Iteration (num., iy, grad. norm, func.) : 4 1 1.147466159e-04 6.729973912e-09 + Iteration (num., iy, grad. norm, func.) : 5 1 1.033293877e-04 5.063463186e-09 + Iteration (num., iy, grad. norm, func.) : 6 1 5.272698157e-05 2.929839938e-09 + Iteration (num., iy, grad. norm, func.) : 7 1 4.894442104e-05 2.071717930e-09 + Iteration (num., iy, grad. norm, func.) : 8 1 2.850823295e-05 1.797321609e-09 + Iteration (num., iy, grad. norm, func.) : 9 1 2.566163204e-05 1.713105879e-09 + Iteration (num., iy, grad. norm, func.) : 10 1 2.728118053e-05 1.606498899e-09 + Iteration (num., iy, grad. norm, func.) : 11 1 2.407731298e-05 1.439553327e-09 + Iteration (num., iy, grad. norm, func.) : 12 1 1.588414550e-05 1.302254672e-09 + Iteration (num., iy, grad. norm, func.) : 13 1 1.941516089e-05 1.258276496e-09 + Iteration (num., iy, grad. norm, func.) : 14 1 1.159190980e-05 1.239434907e-09 + Iteration (num., iy, grad. norm, func.) : 15 1 1.872674427e-05 1.235569556e-09 + Iteration (num., iy, grad. norm, func.) : 16 1 1.169536710e-05 1.206341167e-09 + Iteration (num., iy, grad. norm, func.) : 17 1 1.005666171e-05 1.172498758e-09 + Iteration (num., iy, grad. norm, func.) : 18 1 4.240888944e-06 1.143928197e-09 + Iteration (num., iy, grad. norm, func.) : 19 1 4.653082813e-06 1.142989811e-09 + Solving for output 1 - done. Time (sec): 1.2653127 + Solving nonlinear problem (n=3375) - done. Time (sec): 2.5295482 + Solving for degrees of freedom - done. Time (sec): 2.6548080 + Training - done. Time (sec): 2.8555365 + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009975 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009975 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009985 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0010002 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009987 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009971 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009966 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009964 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009997 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009980 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0010295 + + Prediction time/pt. (sec) : 0.0000103 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009966 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009966 + + Prediction time/pt. (sec) : 0.0000100 + + +.. figure:: b777_engine.png + :scale: 60 % + :align: center + +RMTC +---- + +.. code-block:: python + + from smt.surrogate_models import RMTC + from smt.examples.b777_engine.b777_engine import get_b777_engine, plot_b777_engine + + xt, yt, dyt_dxt, xlimits = get_b777_engine() + + interp = RMTC( + num_elements=6, + xlimits=xlimits, + nonlinear_maxiter=20, + approx_order=2, + energy_weight=0.0, + regularization_weight=0.0, + extrapolate=True, + ) + interp.set_training_values(xt, yt) + interp.set_training_derivatives(xt, dyt_dxt[:, :, 0], 0) + interp.set_training_derivatives(xt, dyt_dxt[:, :, 1], 1) + interp.set_training_derivatives(xt, dyt_dxt[:, :, 2], 2) + interp.train() + + plot_b777_engine(xt, yt, xlimits, interp) + +:: + + ___________________________________________________________________________ + + RMTC + ___________________________________________________________________________ + + Problem size + + # training points. : 1056 + + ___________________________________________________________________________ + + Training + + Training ... + Pre-computing matrices ... + Computing dof2coeff ... + Computing dof2coeff - done. Time (sec): 0.0253110 + Initializing Hessian ... + Initializing Hessian - done. Time (sec): 0.0009971 + Computing energy terms ... + Computing energy terms - done. Time (sec): 0.1351936 + Computing approximation terms ... + Computing approximation terms - done. Time (sec): 0.0579875 + Pre-computing matrices - done. Time (sec): 0.2194891 + Solving for degrees of freedom ... + Solving initial startup problem (n=2744) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 7.864862172e+07 2.642628384e+13 + Iteration (num., iy, grad. norm, func.) : 0 0 2.301220784e+05 2.043089744e+09 + Solving for output 0 - done. Time (sec): 0.1790462 + Solving for output 1 ... + Iteration (num., iy, grad. norm, func.) : 0 1 8.095040141e-01 7.697335516e-04 + Iteration (num., iy, grad. norm, func.) : 0 1 1.247766422e-03 1.322502818e-07 + Solving for output 1 - done. Time (sec): 0.1666119 + Solving initial startup problem (n=2744) - done. Time (sec): 0.3456581 + Solving nonlinear problem (n=2744) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 2.301220784e+05 2.043089744e+09 + Iteration (num., iy, grad. norm, func.) : 0 0 2.853768988e+04 4.204382514e+08 + Iteration (num., iy, grad. norm, func.) : 1 0 1.600292441e+04 3.528182269e+08 + Iteration (num., iy, grad. norm, func.) : 2 0 2.192959017e+04 3.499245939e+08 + Iteration (num., iy, grad. norm, func.) : 3 0 8.908430676e+03 3.371333491e+08 + Iteration (num., iy, grad. norm, func.) : 4 0 4.826696294e+03 3.326895469e+08 + Iteration (num., iy, grad. norm, func.) : 5 0 4.466377088e+03 3.320607428e+08 + Iteration (num., iy, grad. norm, func.) : 6 0 2.811936973e+03 3.312893629e+08 + Iteration (num., iy, grad. norm, func.) : 7 0 1.939207818e+03 3.307236804e+08 + Iteration (num., iy, grad. norm, func.) : 8 0 1.606864853e+03 3.304685748e+08 + Iteration (num., iy, grad. norm, func.) : 9 0 1.876454015e+03 3.303459940e+08 + Iteration (num., iy, grad. norm, func.) : 10 0 1.381228599e+03 3.302005814e+08 + Iteration (num., iy, grad. norm, func.) : 11 0 1.427468675e+03 3.301258329e+08 + Iteration (num., iy, grad. norm, func.) : 12 0 8.863567115e+02 3.300062354e+08 + Iteration (num., iy, grad. norm, func.) : 13 0 8.708862351e+02 3.299010976e+08 + Iteration (num., iy, grad. norm, func.) : 14 0 4.801718324e+02 3.298332669e+08 + Iteration (num., iy, grad. norm, func.) : 15 0 4.188928791e+02 3.298207697e+08 + Iteration (num., iy, grad. norm, func.) : 16 0 4.809479966e+02 3.298126567e+08 + Iteration (num., iy, grad. norm, func.) : 17 0 7.635186662e+02 3.298073881e+08 + Iteration (num., iy, grad. norm, func.) : 18 0 4.534280606e+02 3.298003186e+08 + Iteration (num., iy, grad. norm, func.) : 19 0 3.973724388e+02 3.297955330e+08 + Solving for output 0 - done. Time (sec): 2.9330854 + Solving for output 1 ... + Iteration (num., iy, grad. norm, func.) : 0 1 1.247766422e-03 1.322502818e-07 + Iteration (num., iy, grad. norm, func.) : 0 1 3.967550240e-04 9.514176202e-09 + Iteration (num., iy, grad. norm, func.) : 1 1 3.022974213e-04 7.902718497e-09 + Iteration (num., iy, grad. norm, func.) : 2 1 2.940967866e-04 6.064696564e-09 + Iteration (num., iy, grad. norm, func.) : 3 1 9.190272069e-05 4.306045763e-09 + Iteration (num., iy, grad. norm, func.) : 4 1 9.362879272e-05 4.066039150e-09 + Iteration (num., iy, grad. norm, func.) : 5 1 7.167971812e-05 3.747268947e-09 + Iteration (num., iy, grad. norm, func.) : 6 1 4.524808243e-05 3.367699068e-09 + Iteration (num., iy, grad. norm, func.) : 7 1 3.853416937e-05 3.209181099e-09 + Iteration (num., iy, grad. norm, func.) : 8 1 4.232980316e-05 3.129247089e-09 + Iteration (num., iy, grad. norm, func.) : 9 1 3.190371873e-05 3.067320241e-09 + Iteration (num., iy, grad. norm, func.) : 10 1 1.974177570e-05 3.040505234e-09 + Iteration (num., iy, grad. norm, func.) : 11 1 2.881369844e-05 3.034137061e-09 + Iteration (num., iy, grad. norm, func.) : 12 1 1.436660531e-05 3.012771286e-09 + Iteration (num., iy, grad. norm, func.) : 13 1 1.788606605e-05 2.992655580e-09 + Iteration (num., iy, grad. norm, func.) : 14 1 1.152719843e-05 2.958698604e-09 + Iteration (num., iy, grad. norm, func.) : 15 1 1.156807011e-05 2.937664628e-09 + Iteration (num., iy, grad. norm, func.) : 16 1 8.045689579e-06 2.928032775e-09 + Iteration (num., iy, grad. norm, func.) : 17 1 1.163004012e-05 2.926867367e-09 + Iteration (num., iy, grad. norm, func.) : 18 1 8.598124448e-06 2.924036478e-09 + Iteration (num., iy, grad. norm, func.) : 19 1 6.696045882e-06 2.923305126e-09 + Solving for output 1 - done. Time (sec): 2.9247584 + Solving nonlinear problem (n=2744) - done. Time (sec): 5.8578439 + Solving for degrees of freedom - done. Time (sec): 6.2035019 + Training - done. Time (sec): 6.4249947 + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0019631 + + Prediction time/pt. (sec) : 0.0000196 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009973 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009983 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009999 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0020270 + + Prediction time/pt. (sec) : 0.0000203 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0019937 + + Prediction time/pt. (sec) : 0.0000199 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009971 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0019948 + + Prediction time/pt. (sec) : 0.0000199 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0020351 + + Prediction time/pt. (sec) : 0.0000204 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009968 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009887 + + Prediction time/pt. (sec) : 0.0000099 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0019820 + + Prediction time/pt. (sec) : 0.0000198 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0019956 + + Prediction time/pt. (sec) : 0.0000200 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0019956 + + Prediction time/pt. (sec) : 0.0000200 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0010297 + + Prediction time/pt. (sec) : 0.0000103 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009954 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0009961 + + Prediction time/pt. (sec) : 0.0000100 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0020037 + + Prediction time/pt. (sec) : 0.0000200 + + +.. figure:: b777_engine.png + :scale: 60 % + :align: center diff --git a/doc/_src_docs/examples/one_D_step/ex_1d_step.png b/doc/_src_docs/examples/one_D_step/ex_1d_step.png index ee21ccd4f..de4367449 100644 Binary files a/doc/_src_docs/examples/one_D_step/ex_1d_step.png and b/doc/_src_docs/examples/one_D_step/ex_1d_step.png differ diff --git a/doc/_src_docs/examples/one_D_step/ex_1d_step.rst b/doc/_src_docs/examples/one_D_step/ex_1d_step.rst index e6bebf148..f2f93e81d 100644 --- a/doc/_src_docs/examples/one_D_step/ex_1d_step.rst +++ b/doc/_src_docs/examples/one_D_step/ex_1d_step.rst @@ -1,286 +1,281 @@ -1-D step-like data set -====================== - -.. code-block:: python - - import numpy as np - - - def get_one_d_step(): - xt = np.array( - [ - 0.0000, - 0.4000, - 0.6000, - 0.7000, - 0.7500, - 0.7750, - 0.8000, - 0.8500, - 0.8750, - 0.9000, - 0.9250, - 0.9500, - 0.9750, - 1.0000, - 1.0250, - 1.0500, - 1.1000, - 1.2000, - 1.3000, - 1.4000, - 1.6000, - 1.8000, - 2.0000, - ], - dtype=np.float64, - ) - yt = np.array( - [ - 0.0130, - 0.0130, - 0.0130, - 0.0130, - 0.0130, - 0.0130, - 0.0130, - 0.0132, - 0.0135, - 0.0140, - 0.0162, - 0.0230, - 0.0275, - 0.0310, - 0.0344, - 0.0366, - 0.0396, - 0.0410, - 0.0403, - 0.0390, - 0.0360, - 0.0350, - 0.0345, - ], - dtype=np.float64, - ) - - xlimits = np.array([[0.0, 2.0]]) - - return xt, yt, xlimits - - - def plot_one_d_step(xt, yt, limits, interp): - import numpy as np - import matplotlib - - matplotlib.use("Agg") - import matplotlib.pyplot as plt - - num = 500 - x = np.linspace(0.0, 2.0, num) - y = interp.predict_values(x)[:, 0] - - plt.plot(x, y) - plt.plot(xt, yt, "o") - plt.xlabel("x") - plt.ylabel("y") - plt.show() - - -RMTB ----- - -.. code-block:: python - - from smt.surrogate_models import RMTB - from smt.examples.one_D_step.one_D_step import get_one_d_step, plot_one_d_step - - xt, yt, xlimits = get_one_d_step() - - interp = RMTB( - num_ctrl_pts=100, - xlimits=xlimits, - nonlinear_maxiter=20, - solver_tolerance=1e-16, - energy_weight=1e-14, - regularization_weight=0.0, - ) - interp.set_training_values(xt, yt) - interp.train() - - plot_one_d_step(xt, yt, xlimits, interp) - -:: - - ___________________________________________________________________________ - - RMTB - ___________________________________________________________________________ - - Problem size - - # training points. : 23 - - ___________________________________________________________________________ - - Training - - Training ... - Pre-computing matrices ... - Computing dof2coeff ... - Computing dof2coeff - done. Time (sec): 0.0000000 - Initializing Hessian ... - Initializing Hessian - done. Time (sec): 0.0000000 - Computing energy terms ... - Computing energy terms - done. Time (sec): 0.0000000 - Computing approximation terms ... - Computing approximation terms - done. Time (sec): 0.0000000 - Pre-computing matrices - done. Time (sec): 0.0000000 - Solving for degrees of freedom ... - Solving initial startup problem (n=100) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 1.032652876e-01 8.436300000e-03 - Iteration (num., iy, grad. norm, func.) : 0 0 8.326567782e-09 2.218506146e-13 - Solving for output 0 - done. Time (sec): 0.0070736 - Solving initial startup problem (n=100) - done. Time (sec): 0.0070736 - Solving nonlinear problem (n=100) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 1.550397729e-11 2.217742297e-13 - Iteration (num., iy, grad. norm, func.) : 0 0 1.400133688e-11 2.190130776e-13 - Iteration (num., iy, grad. norm, func.) : 1 0 4.569917417e-10 1.398657411e-13 - Iteration (num., iy, grad. norm, func.) : 2 0 3.273418041e-10 9.582645418e-14 - Iteration (num., iy, grad. norm, func.) : 3 0 9.631253690e-11 2.487556028e-14 - Iteration (num., iy, grad. norm, func.) : 4 0 2.807786097e-11 1.154934069e-14 - Iteration (num., iy, grad. norm, func.) : 5 0 1.047652622e-11 9.424623014e-15 - Iteration (num., iy, grad. norm, func.) : 6 0 2.796406609e-12 8.629430946e-15 - Iteration (num., iy, grad. norm, func.) : 7 0 2.503249902e-12 8.611872251e-15 - Iteration (num., iy, grad. norm, func.) : 8 0 1.673758712e-12 8.544715841e-15 - Iteration (num., iy, grad. norm, func.) : 9 0 4.321920620e-13 8.467209450e-15 - Iteration (num., iy, grad. norm, func.) : 10 0 1.206983452e-13 8.455862293e-15 - Iteration (num., iy, grad. norm, func.) : 11 0 3.366638988e-14 8.453930122e-15 - Iteration (num., iy, grad. norm, func.) : 12 0 1.432594106e-14 8.453696373e-15 - Iteration (num., iy, grad. norm, func.) : 13 0 1.419395614e-14 8.453592635e-15 - Iteration (num., iy, grad. norm, func.) : 14 0 3.778253812e-15 8.453316574e-15 - Iteration (num., iy, grad. norm, func.) : 15 0 1.065786022e-15 8.453276042e-15 - Iteration (num., iy, grad. norm, func.) : 16 0 2.072128988e-15 8.453275135e-15 - Iteration (num., iy, grad. norm, func.) : 17 0 1.842351695e-16 8.453270514e-15 - Iteration (num., iy, grad. norm, func.) : 18 0 1.015886357e-16 8.453270452e-15 - Iteration (num., iy, grad. norm, func.) : 19 0 1.015887329e-16 8.453270452e-15 - Solving for output 0 - done. Time (sec): 0.1002092 - Solving nonlinear problem (n=100) - done. Time (sec): 0.1002092 - Solving for degrees of freedom - done. Time (sec): 0.1072829 - Training - done. Time (sec): 0.1072829 - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0000000 - - Prediction time/pt. (sec) : 0.0000000 - - -.. figure:: ex_1d_step.png - :scale: 80 % - :align: center - -RMTC ----- - -.. code-block:: python - - from smt.surrogate_models import RMTC - from smt.examples.one_D_step.one_D_step import get_one_d_step, plot_one_d_step - - xt, yt, xlimits = get_one_d_step() - - interp = RMTC( - num_elements=40, - xlimits=xlimits, - nonlinear_maxiter=20, - solver_tolerance=1e-16, - energy_weight=1e-14, - regularization_weight=0.0, - ) - interp.set_training_values(xt, yt) - interp.train() - - plot_one_d_step(xt, yt, xlimits, interp) - -:: - - ___________________________________________________________________________ - - RMTC - ___________________________________________________________________________ - - Problem size - - # training points. : 23 - - ___________________________________________________________________________ - - Training - - Training ... - Pre-computing matrices ... - Computing dof2coeff ... - Computing dof2coeff - done. Time (sec): 0.0000000 - Initializing Hessian ... - Initializing Hessian - done. Time (sec): 0.0000000 - Computing energy terms ... - Computing energy terms - done. Time (sec): 0.0000000 - Computing approximation terms ... - Computing approximation terms - done. Time (sec): 0.0000000 - Pre-computing matrices - done. Time (sec): 0.0000000 - Solving for degrees of freedom ... - Solving initial startup problem (n=82) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 1.470849329e-01 8.436300000e-03 - Iteration (num., iy, grad. norm, func.) : 0 0 1.271524727e-11 2.493686417e-14 - Solving for output 0 - done. Time (sec): 0.0080578 - Solving initial startup problem (n=82) - done. Time (sec): 0.0080578 - Solving nonlinear problem (n=82) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 7.484146458e-12 2.493686273e-14 - Iteration (num., iy, grad. norm, func.) : 0 0 9.032463140e-12 2.483319826e-14 - Iteration (num., iy, grad. norm, func.) : 1 0 8.723372989e-11 2.393675636e-14 - Iteration (num., iy, grad. norm, func.) : 2 0 4.783883236e-11 1.793850937e-14 - Iteration (num., iy, grad. norm, func.) : 3 0 4.678916694e-11 1.785317983e-14 - Iteration (num., iy, grad. norm, func.) : 4 0 1.297955451e-11 1.193038054e-14 - Iteration (num., iy, grad. norm, func.) : 5 0 3.942464065e-12 1.121509131e-14 - Iteration (num., iy, grad. norm, func.) : 6 0 8.384726431e-13 1.110564189e-14 - Iteration (num., iy, grad. norm, func.) : 7 0 2.581741267e-13 1.109374227e-14 - Iteration (num., iy, grad. norm, func.) : 8 0 7.635918060e-14 1.109026987e-14 - Iteration (num., iy, grad. norm, func.) : 9 0 2.106298788e-14 1.108953137e-14 - Iteration (num., iy, grad. norm, func.) : 10 0 5.042586986e-15 1.108941658e-14 - Iteration (num., iy, grad. norm, func.) : 11 0 8.730387249e-16 1.108940427e-14 - Iteration (num., iy, grad. norm, func.) : 12 0 1.188005043e-16 1.108940347e-14 - Iteration (num., iy, grad. norm, func.) : 13 0 2.828378041e-16 1.108940346e-14 - Iteration (num., iy, grad. norm, func.) : 14 0 2.828383946e-16 1.108940346e-14 - Iteration (num., iy, grad. norm, func.) : 15 0 2.828383946e-16 1.108940346e-14 - Iteration (num., iy, grad. norm, func.) : 16 0 2.828383946e-16 1.108940346e-14 - Iteration (num., iy, grad. norm, func.) : 17 0 2.828383946e-16 1.108940346e-14 - Iteration (num., iy, grad. norm, func.) : 18 0 2.828383946e-16 1.108940346e-14 - Iteration (num., iy, grad. norm, func.) : 19 0 2.828383946e-16 1.108940346e-14 - Solving for output 0 - done. Time (sec): 0.0748911 - Solving nonlinear problem (n=82) - done. Time (sec): 0.0748911 - Solving for degrees of freedom - done. Time (sec): 0.0829489 - Training - done. Time (sec): 0.0829489 - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0000000 - - Prediction time/pt. (sec) : 0.0000000 - - -.. figure:: ex_1d_step.png - :scale: 80 % - :align: center +1-D step-like data set +====================== + +.. code-block:: python + + import numpy as np + + + def get_one_d_step(): + xt = np.array( + [ + 0.0000, + 0.4000, + 0.6000, + 0.7000, + 0.7500, + 0.7750, + 0.8000, + 0.8500, + 0.8750, + 0.9000, + 0.9250, + 0.9500, + 0.9750, + 1.0000, + 1.0250, + 1.0500, + 1.1000, + 1.2000, + 1.3000, + 1.4000, + 1.6000, + 1.8000, + 2.0000, + ], + dtype=np.float64, + ) + yt = np.array( + [ + 0.0130, + 0.0130, + 0.0130, + 0.0130, + 0.0130, + 0.0130, + 0.0130, + 0.0132, + 0.0135, + 0.0140, + 0.0162, + 0.0230, + 0.0275, + 0.0310, + 0.0344, + 0.0366, + 0.0396, + 0.0410, + 0.0403, + 0.0390, + 0.0360, + 0.0350, + 0.0345, + ], + dtype=np.float64, + ) + + xlimits = np.array([[0.0, 2.0]]) + + return xt, yt, xlimits + + + def plot_one_d_step(xt, yt, limits, interp): + import numpy as np + import matplotlib + + matplotlib.use("Agg") + import matplotlib.pyplot as plt + + num = 500 + x = np.linspace(0.0, 2.0, num) + y = interp.predict_values(x)[:, 0] + + plt.plot(x, y) + plt.plot(xt, yt, "o") + plt.xlabel("x") + plt.ylabel("y") + plt.show() + + +RMTB +---- + +.. code-block:: python + + from smt.surrogate_models import RMTB + from smt.examples.one_D_step.one_D_step import get_one_d_step, plot_one_d_step + + xt, yt, xlimits = get_one_d_step() + + interp = RMTB( + num_ctrl_pts=100, + xlimits=xlimits, + nonlinear_maxiter=20, + solver_tolerance=1e-16, + energy_weight=1e-14, + regularization_weight=0.0, + ) + interp.set_training_values(xt, yt) + interp.train() + + plot_one_d_step(xt, yt, xlimits, interp) + +:: + + ___________________________________________________________________________ + + RMTB + ___________________________________________________________________________ + + Problem size + + # training points. : 23 + + ___________________________________________________________________________ + + Training + + Training ... + Pre-computing matrices ... + Computing dof2coeff ... + Computing dof2coeff - done. Time (sec): 0.0000000 + Initializing Hessian ... + Initializing Hessian - done. Time (sec): 0.0000000 + Computing energy terms ... + Computing energy terms - done. Time (sec): 0.0019946 + Computing approximation terms ... + Computing approximation terms - done. Time (sec): 0.0000000 + Pre-computing matrices - done. Time (sec): 0.0019946 + Solving for degrees of freedom ... + Solving initial startup problem (n=100) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 1.032652876e-01 8.436300000e-03 + Iteration (num., iy, grad. norm, func.) : 0 0 8.879126107e-10 2.217748918e-13 + Solving for output 0 - done. Time (sec): 0.0049868 + Solving initial startup problem (n=100) - done. Time (sec): 0.0049868 + Solving nonlinear problem (n=100) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 1.552681635e-11 2.217739998e-13 + Iteration (num., iy, grad. norm, func.) : 0 0 1.400336329e-11 2.190101681e-13 + Iteration (num., iy, grad. norm, func.) : 1 0 4.614334315e-10 1.420996036e-13 + Iteration (num., iy, grad. norm, func.) : 2 0 3.386385427e-10 9.957696091e-14 + Iteration (num., iy, grad. norm, func.) : 3 0 9.965548477e-11 2.559472351e-14 + Iteration (num., iy, grad. norm, func.) : 4 0 3.028292057e-11 1.206316456e-14 + Iteration (num., iy, grad. norm, func.) : 5 0 8.399776244e-12 9.089294075e-15 + Iteration (num., iy, grad. norm, func.) : 6 0 2.074830115e-12 8.557170853e-15 + Iteration (num., iy, grad. norm, func.) : 7 0 3.817538099e-13 8.464326104e-15 + Iteration (num., iy, grad. norm, func.) : 8 0 2.920095323e-13 8.459776827e-15 + Iteration (num., iy, grad. norm, func.) : 9 0 7.721236909e-14 8.454758697e-15 + Iteration (num., iy, grad. norm, func.) : 10 0 2.417042968e-14 8.453876676e-15 + Iteration (num., iy, grad. norm, func.) : 11 0 2.213769117e-14 8.453844066e-15 + Iteration (num., iy, grad. norm, func.) : 12 0 7.168882034e-15 8.453415843e-15 + Iteration (num., iy, grad. norm, func.) : 13 0 1.322758762e-15 8.453276708e-15 + Iteration (num., iy, grad. norm, func.) : 14 0 1.086918935e-15 8.453275361e-15 + Iteration (num., iy, grad. norm, func.) : 15 0 9.562581577e-16 8.453275241e-15 + Iteration (num., iy, grad. norm, func.) : 16 0 8.144965006e-16 8.453273757e-15 + Iteration (num., iy, grad. norm, func.) : 17 0 6.113930019e-16 8.453272411e-15 + Iteration (num., iy, grad. norm, func.) : 18 0 1.911917523e-16 8.453270747e-15 + Iteration (num., iy, grad. norm, func.) : 19 0 1.931585657e-16 8.453270703e-15 + Solving for output 0 - done. Time (sec): 0.0997744 + Solving nonlinear problem (n=100) - done. Time (sec): 0.0997744 + Solving for degrees of freedom - done. Time (sec): 0.1047611 + Training - done. Time (sec): 0.1067557 + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + +.. figure:: ex_1d_step.png + :scale: 80 % + :align: center + +RMTC +---- + +.. code-block:: python + + from smt.surrogate_models import RMTC + from smt.examples.one_D_step.one_D_step import get_one_d_step, plot_one_d_step + + xt, yt, xlimits = get_one_d_step() + + interp = RMTC( + num_elements=40, + xlimits=xlimits, + nonlinear_maxiter=20, + solver_tolerance=1e-16, + energy_weight=1e-14, + regularization_weight=0.0, + ) + interp.set_training_values(xt, yt) + interp.train() + + plot_one_d_step(xt, yt, xlimits, interp) + +:: + + ___________________________________________________________________________ + + RMTC + ___________________________________________________________________________ + + Problem size + + # training points. : 23 + + ___________________________________________________________________________ + + Training + + Training ... + Pre-computing matrices ... + Computing dof2coeff ... + Computing dof2coeff - done. Time (sec): 0.0009971 + Initializing Hessian ... + Initializing Hessian - done. Time (sec): 0.0000000 + Computing energy terms ... + Computing energy terms - done. Time (sec): 0.0010304 + Computing approximation terms ... + Computing approximation terms - done. Time (sec): 0.0009646 + Pre-computing matrices - done. Time (sec): 0.0029922 + Solving for degrees of freedom ... + Solving initial startup problem (n=82) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 1.470849329e-01 8.436300000e-03 + Iteration (num., iy, grad. norm, func.) : 0 0 4.031960886e-12 2.493686471e-14 + Solving for output 0 - done. Time (sec): 0.0050130 + Solving initial startup problem (n=82) - done. Time (sec): 0.0050130 + Solving nonlinear problem (n=82) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 7.484146480e-12 2.493686331e-14 + Iteration (num., iy, grad. norm, func.) : 0 0 9.032461435e-12 2.483319871e-14 + Iteration (num., iy, grad. norm, func.) : 1 0 8.676384412e-11 2.388494401e-14 + Iteration (num., iy, grad. norm, func.) : 2 0 6.755496606e-11 1.951826538e-14 + Iteration (num., iy, grad. norm, func.) : 3 0 4.611359154e-11 1.521578957e-14 + Iteration (num., iy, grad. norm, func.) : 4 0 9.560517650e-12 1.149409997e-14 + Iteration (num., iy, grad. norm, func.) : 5 0 5.356117813e-12 1.129031598e-14 + Iteration (num., iy, grad. norm, func.) : 6 0 8.365028192e-13 1.110518657e-14 + Iteration (num., iy, grad. norm, func.) : 7 0 1.740075688e-13 1.109155907e-14 + Iteration (num., iy, grad. norm, func.) : 8 0 1.554156778e-13 1.109124706e-14 + Iteration (num., iy, grad. norm, func.) : 9 0 1.497358635e-13 1.109091009e-14 + Iteration (num., iy, grad. norm, func.) : 10 0 3.279226021e-14 1.108962650e-14 + Iteration (num., iy, grad. norm, func.) : 11 0 1.384472847e-14 1.108945872e-14 + Iteration (num., iy, grad. norm, func.) : 12 0 2.898607110e-15 1.108940698e-14 + Iteration (num., iy, grad. norm, func.) : 13 0 3.214675952e-16 1.108940346e-14 + Iteration (num., iy, grad. norm, func.) : 14 0 1.039690103e-17 1.108940340e-14 + Solving for output 0 - done. Time (sec): 0.0778186 + Solving nonlinear problem (n=82) - done. Time (sec): 0.0778186 + Solving for degrees of freedom - done. Time (sec): 0.0828316 + Training - done. Time (sec): 0.0858238 + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0009973 + + Prediction time/pt. (sec) : 0.0000020 + + +.. figure:: ex_1d_step.png + :scale: 80 % + :align: center diff --git a/doc/_src_docs/examples/rans_crm_wing/rans_crm_wing.png b/doc/_src_docs/examples/rans_crm_wing/rans_crm_wing.png index 4d6c93a9d..41ce1b640 100644 Binary files a/doc/_src_docs/examples/rans_crm_wing/rans_crm_wing.png and b/doc/_src_docs/examples/rans_crm_wing/rans_crm_wing.png differ diff --git a/doc/_src_docs/examples/rans_crm_wing/rans_crm_wing.rst b/doc/_src_docs/examples/rans_crm_wing/rans_crm_wing.rst index 07ee5f562..152cad7b0 100644 --- a/doc/_src_docs/examples/rans_crm_wing/rans_crm_wing.rst +++ b/doc/_src_docs/examples/rans_crm_wing/rans_crm_wing.rst @@ -1,1136 +1,1143 @@ -RANS CRM wing 2-D data set -========================== - -.. code-block:: python - - import numpy as np - - - raw = np.array( - [ - [ - 2.000000000000000000e00, - 4.500000000000000111e-01, - 1.536799999999999972e-02, - 3.674239999999999728e-01, - 5.592279999999999474e-01, - -1.258039999999999992e-01, - -1.248699999999999984e-02, - ], - [ - 3.500000000000000000e00, - 4.500000000000000111e-01, - 1.985100000000000059e-02, - 4.904470000000000218e-01, - 7.574600000000000222e-01, - -1.615260000000000029e-01, - 8.987000000000000197e-03, - ], - [ - 5.000000000000000000e00, - 4.500000000000000111e-01, - 2.571000000000000021e-02, - 6.109189999999999898e-01, - 9.497949999999999449e-01, - -1.954619999999999969e-01, - 4.090900000000000092e-02, - ], - [ - 6.500000000000000000e00, - 4.500000000000000111e-01, - 3.304200000000000192e-02, - 7.266120000000000356e-01, - 1.131138999999999895e00, - -2.255890000000000117e-01, - 8.185399999999999621e-02, - ], - [ - 8.000000000000000000e00, - 4.500000000000000111e-01, - 4.318999999999999923e-02, - 8.247250000000000414e-01, - 1.271487000000000034e00, - -2.397040000000000004e-01, - 1.217659999999999992e-01, - ], - [ - 0.000000000000000000e00, - 5.799999999999999600e-01, - 1.136200000000000057e-02, - 2.048760000000000026e-01, - 2.950280000000000125e-01, - -7.882100000000000217e-02, - -2.280099999999999835e-02, - ], - [ - 1.500000000000000000e00, - 5.799999999999999600e-01, - 1.426000000000000011e-02, - 3.375619999999999732e-01, - 5.114130000000000065e-01, - -1.189420000000000061e-01, - -1.588200000000000028e-02, - ], - [ - 3.000000000000000000e00, - 5.799999999999999600e-01, - 1.866400000000000003e-02, - 4.687450000000000228e-01, - 7.240400000000000169e-01, - -1.577669999999999906e-01, - 3.099999999999999891e-03, - ], - [ - 4.500000000000000000e00, - 5.799999999999999600e-01, - 2.461999999999999952e-02, - 5.976639999999999731e-01, - 9.311709999999999710e-01, - -1.944160000000000055e-01, - 3.357500000000000068e-02, - ], - [ - 6.000000000000000000e00, - 5.799999999999999600e-01, - 3.280700000000000283e-02, - 7.142249999999999988e-01, - 1.111707999999999918e00, - -2.205870000000000053e-01, - 7.151699999999999724e-02, - ], - [ - 0.000000000000000000e00, - 6.800000000000000488e-01, - 1.138800000000000055e-02, - 2.099310000000000065e-01, - 3.032230000000000203e-01, - -8.187899999999999345e-02, - -2.172699999999999979e-02, - ], - [ - 1.500000000000000000e00, - 6.800000000000000488e-01, - 1.458699999999999927e-02, - 3.518569999999999753e-01, - 5.356630000000000003e-01, - -1.257649999999999879e-01, - -1.444800000000000077e-02, - ], - [ - 3.000000000000000000e00, - 6.800000000000000488e-01, - 1.952800000000000022e-02, - 4.924879999999999813e-01, - 7.644769999999999621e-01, - -1.678040000000000087e-01, - 6.023999999999999841e-03, - ], - [ - 4.500000000000000000e00, - 6.800000000000000488e-01, - 2.666699999999999973e-02, - 6.270339999999999803e-01, - 9.801630000000000065e-01, - -2.035240000000000105e-01, - 3.810000000000000192e-02, - ], - [ - 6.000000000000000000e00, - 6.800000000000000488e-01, - 3.891800000000000120e-02, - 7.172730000000000494e-01, - 1.097855999999999943e00, - -2.014620000000000022e-01, - 6.640000000000000069e-02, - ], - [ - 0.000000000000000000e00, - 7.500000000000000000e-01, - 1.150699999999999987e-02, - 2.149069999999999869e-01, - 3.115740000000000176e-01, - -8.498999999999999611e-02, - -2.057700000000000154e-02, - ], - [ - 1.250000000000000000e00, - 7.500000000000000000e-01, - 1.432600000000000019e-02, - 3.415969999999999840e-01, - 5.199390000000000400e-01, - -1.251009999999999900e-01, - -1.515400000000000080e-02, - ], - [ - 2.500000000000000000e00, - 7.500000000000000000e-01, - 1.856000000000000011e-02, - 4.677589999999999804e-01, - 7.262499999999999512e-01, - -1.635169999999999957e-01, - 3.989999999999999949e-04, - ], - [ - 3.750000000000000000e00, - 7.500000000000000000e-01, - 2.472399999999999945e-02, - 5.911459999999999493e-01, - 9.254930000000000101e-01, - -1.966150000000000120e-01, - 2.524900000000000061e-02, - ], - [ - 5.000000000000000000e00, - 7.500000000000000000e-01, - 3.506800000000000195e-02, - 7.047809999999999908e-01, - 1.097736000000000045e00, - -2.143069999999999975e-01, - 5.321300000000000335e-02, - ], - [ - 0.000000000000000000e00, - 8.000000000000000444e-01, - 1.168499999999999921e-02, - 2.196390000000000009e-01, - 3.197160000000000002e-01, - -8.798200000000000465e-02, - -1.926999999999999894e-02, - ], - [ - 1.250000000000000000e00, - 8.000000000000000444e-01, - 1.481599999999999931e-02, - 3.553939999999999877e-01, - 5.435950000000000504e-01, - -1.317419999999999980e-01, - -1.345599999999999921e-02, - ], - [ - 2.500000000000000000e00, - 8.000000000000000444e-01, - 1.968999999999999917e-02, - 4.918299999999999894e-01, - 7.669930000000000359e-01, - -1.728079999999999894e-01, - 3.756999999999999923e-03, - ], - [ - 3.750000000000000000e00, - 8.000000000000000444e-01, - 2.785599999999999882e-02, - 6.324319999999999942e-01, - 9.919249999999999456e-01, - -2.077100000000000057e-01, - 3.159800000000000109e-02, - ], - [ - 5.000000000000000000e00, - 8.000000000000000444e-01, - 4.394300000000000289e-02, - 7.650689999999999991e-01, - 1.188355999999999968e00, - -2.332680000000000031e-01, - 5.645000000000000018e-02, - ], - [ - 0.000000000000000000e00, - 8.299999999999999600e-01, - 1.186100000000000002e-02, - 2.232899999999999885e-01, - 3.261100000000000110e-01, - -9.028400000000000314e-02, - -1.806500000000000120e-02, - ], - [ - 1.000000000000000000e00, - 8.299999999999999600e-01, - 1.444900000000000004e-02, - 3.383419999999999761e-01, - 5.161710000000000464e-01, - -1.279530000000000112e-01, - -1.402400000000000001e-02, - ], - [ - 2.000000000000000000e00, - 8.299999999999999600e-01, - 1.836799999999999891e-02, - 4.554270000000000262e-01, - 7.082190000000000429e-01, - -1.642339999999999911e-01, - -1.793000000000000106e-03, - ], - [ - 3.000000000000000000e00, - 8.299999999999999600e-01, - 2.466899999999999996e-02, - 5.798410000000000508e-01, - 9.088819999999999677e-01, - -2.004589999999999983e-01, - 1.892900000000000138e-02, - ], - [ - 4.000000000000000000e00, - 8.299999999999999600e-01, - 3.700400000000000217e-02, - 7.012720000000000065e-01, - 1.097366000000000064e00, - -2.362420000000000075e-01, - 3.750699999999999867e-02, - ], - [ - 0.000000000000000000e00, - 8.599999999999999867e-01, - 1.224300000000000041e-02, - 2.278100000000000125e-01, - 3.342720000000000136e-01, - -9.307600000000000595e-02, - -1.608400000000000107e-02, - ], - [ - 1.000000000000000000e00, - 8.599999999999999867e-01, - 1.540700000000000056e-02, - 3.551839999999999997e-01, - 5.433130000000000459e-01, - -1.364730000000000110e-01, - -1.162200000000000039e-02, - ], - [ - 2.000000000000000000e00, - 8.599999999999999867e-01, - 2.122699999999999934e-02, - 4.854620000000000046e-01, - 7.552919999999999634e-01, - -1.817850000000000021e-01, - 1.070999999999999903e-03, - ], - [ - 3.000000000000000000e00, - 8.599999999999999867e-01, - 3.178899999999999781e-02, - 6.081849999999999756e-01, - 9.510380000000000500e-01, - -2.252020000000000133e-01, - 1.540799999999999982e-02, - ], - [ - 4.000000000000000000e00, - 8.599999999999999867e-01, - 4.744199999999999806e-02, - 6.846989999999999466e-01, - 1.042564000000000046e00, - -2.333600000000000119e-01, - 2.035400000000000056e-02, - ], - ] - ) - - - def get_rans_crm_wing(): - # data structure: - # alpha, mach, cd, cl, cmx, cmy, cmz - - deg2rad = np.pi / 180.0 - - xt = np.array(raw[:, 0:2]) - yt = np.array(raw[:, 2:4]) - xlimits = np.array([[-3.0, 10.0], [0.4, 0.90]]) - - xt[:, 0] *= deg2rad - xlimits[0, :] *= deg2rad - - return xt, yt, xlimits - - - def plot_rans_crm_wing(xt, yt, limits, interp): - import numpy as np - import matplotlib - - matplotlib.use("Agg") - import matplotlib.pyplot as plt - - rad2deg = 180.0 / np.pi - - num = 500 - num_a = 50 - num_M = 50 - - x = np.zeros((num, 2)) - colors = ["b", "g", "r", "c", "m", "k", "y"] - - nrow = 3 - ncol = 2 - - plt.close() - fig, axs = plt.subplots(3, 2, figsize=(15, 15)) - - # ----------------------------------------------------------------------------- - - mach_numbers = [0.45, 0.68, 0.80, 0.86] - legend_entries = [] - - alpha_sweep = np.linspace(0.0, 8.0, num) - - for ind, mach in enumerate(mach_numbers): - x[:, 0] = alpha_sweep / rad2deg - x[:, 1] = mach - CD = interp.predict_values(x)[:, 0] - CL = interp.predict_values(x)[:, 1] - - mask = np.abs(xt[:, 1] - mach) < 1e-10 - axs[0, 0].plot(xt[mask, 0] * rad2deg, yt[mask, 0], "o" + colors[ind]) - axs[0, 0].plot(alpha_sweep, CD, colors[ind]) - - mask = np.abs(xt[:, 1] - mach) < 1e-10 - axs[0, 1].plot(xt[mask, 0] * rad2deg, yt[mask, 1], "o" + colors[ind]) - axs[0, 1].plot(alpha_sweep, CL, colors[ind]) - - legend_entries.append("M={}".format(mach)) - legend_entries.append("exact") - - axs[0, 0].set(xlabel="alpha (deg)", ylabel="CD") - axs[0, 0].legend(legend_entries) - - axs[0, 1].set(xlabel="alpha (deg)", ylabel="CL") - axs[0, 1].legend(legend_entries) - - # ----------------------------------------------------------------------------- - - alphas = [2.0, 4.0, 6.0] - legend_entries = [] - - mach_sweep = np.linspace(0.45, 0.86, num) - - for ind, alpha in enumerate(alphas): - x[:, 0] = alpha / rad2deg - x[:, 1] = mach_sweep - CD = interp.predict_values(x)[:, 0] - CL = interp.predict_values(x)[:, 1] - - axs[1, 0].plot(mach_sweep, CD, colors[ind]) - axs[1, 1].plot(mach_sweep, CL, colors[ind]) - - legend_entries.append("alpha={}".format(alpha)) - - axs[1, 0].set(xlabel="Mach number", ylabel="CD") - axs[1, 0].legend(legend_entries) - - axs[1, 1].set(xlabel="Mach number", ylabel="CL") - axs[1, 1].legend(legend_entries) - - # ----------------------------------------------------------------------------- - - x = np.zeros((num_a, num_M, 2)) - x[:, :, 0] = np.outer(np.linspace(0.0, 8.0, num_a), np.ones(num_M)) / rad2deg - x[:, :, 1] = np.outer(np.ones(num_a), np.linspace(0.45, 0.86, num_M)) - CD = interp.predict_values(x.reshape((num_a * num_M, 2)))[:, 0].reshape( - (num_a, num_M) - ) - CL = interp.predict_values(x.reshape((num_a * num_M, 2)))[:, 1].reshape( - (num_a, num_M) - ) - - axs[2, 0].plot(xt[:, 1], xt[:, 0] * rad2deg, "o") - axs[2, 0].contour(x[:, :, 1], x[:, :, 0] * rad2deg, CD, 20) - pcm1 = axs[2, 0].pcolormesh( - x[:, :, 1], - x[:, :, 0] * rad2deg, - CD, - cmap=plt.get_cmap("rainbow"), - shading="auto", - ) - fig.colorbar(pcm1, ax=axs[2, 0]) - axs[2, 0].set(xlabel="Mach number", ylabel="alpha (deg)") - axs[2, 0].set_title("CD") - - axs[2, 1].plot(xt[:, 1], xt[:, 0] * rad2deg, "o") - axs[2, 1].contour(x[:, :, 1], x[:, :, 0] * rad2deg, CL, 20) - pcm2 = axs[2, 1].pcolormesh( - x[:, :, 1], - x[:, :, 0] * rad2deg, - CL, - cmap=plt.get_cmap("rainbow"), - shading="auto", - ) - fig.colorbar(pcm2, ax=axs[2, 1]) - axs[2, 1].set(xlabel="Mach number", ylabel="alpha (deg)") - axs[2, 1].set_title("CL") - - plt.show() - - -RMTB ----- - -.. code-block:: python - - from smt.surrogate_models import RMTB - from smt.examples.rans_crm_wing.rans_crm_wing import ( - get_rans_crm_wing, - plot_rans_crm_wing, - ) - - xt, yt, xlimits = get_rans_crm_wing() - - interp = RMTB( - num_ctrl_pts=20, xlimits=xlimits, nonlinear_maxiter=100, energy_weight=1e-12 - ) - interp.set_training_values(xt, yt) - interp.train() - - plot_rans_crm_wing(xt, yt, xlimits, interp) - -:: - - ___________________________________________________________________________ - - RMTB - ___________________________________________________________________________ - - Problem size - - # training points. : 35 - - ___________________________________________________________________________ - - Training - - Training ... - Pre-computing matrices ... - Computing dof2coeff ... - Computing dof2coeff - done. Time (sec): 0.0000029 - Initializing Hessian ... - Initializing Hessian - done. Time (sec): 0.0004156 - Computing energy terms ... - Computing energy terms - done. Time (sec): 0.0048113 - Computing approximation terms ... - Computing approximation terms - done. Time (sec): 0.0004985 - Pre-computing matrices - done. Time (sec): 0.0057843 - Solving for degrees of freedom ... - Solving initial startup problem (n=400) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 9.429150220e-02 1.114942861e-02 - Iteration (num., iy, grad. norm, func.) : 0 0 2.918130789e-08 1.793051131e-10 - Solving for output 0 - done. Time (sec): 0.0118134 - Solving for output 1 ... - Iteration (num., iy, grad. norm, func.) : 0 1 1.955493282e+00 4.799845498e+00 - Iteration (num., iy, grad. norm, func.) : 0 1 5.170087671e-07 4.567684873e-08 - Solving for output 1 - done. Time (sec): 0.0118327 - Solving initial startup problem (n=400) - done. Time (sec): 0.0237074 - Solving nonlinear problem (n=400) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 6.652554858e-09 1.793037268e-10 - Iteration (num., iy, grad. norm, func.) : 0 0 5.849425817e-09 1.703967811e-10 - Iteration (num., iy, grad. norm, func.) : 1 0 3.040889961e-08 1.037059796e-10 - Iteration (num., iy, grad. norm, func.) : 2 0 1.131130625e-08 2.516658925e-11 - Iteration (num., iy, grad. norm, func.) : 3 0 3.676281227e-09 1.068748129e-11 - Iteration (num., iy, grad. norm, func.) : 4 0 1.693073866e-09 8.718637681e-12 - Iteration (num., iy, grad. norm, func.) : 5 0 4.750369588e-10 7.256757966e-12 - Iteration (num., iy, grad. norm, func.) : 6 0 1.296763803e-10 6.493601670e-12 - Iteration (num., iy, grad. norm, func.) : 7 0 2.115784946e-11 6.259321776e-12 - Iteration (num., iy, grad. norm, func.) : 8 0 1.708994783e-11 6.257525829e-12 - Iteration (num., iy, grad. norm, func.) : 9 0 1.081396773e-11 6.257149274e-12 - Iteration (num., iy, grad. norm, func.) : 10 0 9.638642402e-12 6.257140895e-12 - Iteration (num., iy, grad. norm, func.) : 11 0 3.994179550e-12 6.256201039e-12 - Iteration (num., iy, grad. norm, func.) : 12 0 8.744785667e-13 6.255722389e-12 - Solving for output 0 - done. Time (sec): 0.1522686 - Solving for output 1 ... - Iteration (num., iy, grad. norm, func.) : 0 1 9.729143239e-08 4.567639526e-08 - Iteration (num., iy, grad. norm, func.) : 0 1 9.337808969e-08 4.538209916e-08 - Iteration (num., iy, grad. norm, func.) : 1 1 2.904323831e-06 3.250090785e-08 - Iteration (num., iy, grad. norm, func.) : 2 1 8.666318505e-07 4.690442846e-09 - Iteration (num., iy, grad. norm, func.) : 3 1 2.631077034e-07 1.907895208e-09 - Iteration (num., iy, grad. norm, func.) : 4 1 2.396633275e-07 1.714777771e-09 - Iteration (num., iy, grad. norm, func.) : 5 1 7.155568916e-08 5.427480540e-10 - Iteration (num., iy, grad. norm, func.) : 6 1 4.410601247e-08 4.618989947e-10 - Iteration (num., iy, grad. norm, func.) : 7 1 1.321312805e-08 4.212892044e-10 - Iteration (num., iy, grad. norm, func.) : 8 1 4.159525680e-09 3.621523966e-10 - Iteration (num., iy, grad. norm, func.) : 9 1 1.402444629e-09 3.033096767e-10 - Iteration (num., iy, grad. norm, func.) : 10 1 4.159734096e-10 2.740841148e-10 - Iteration (num., iy, grad. norm, func.) : 11 1 3.192171188e-10 2.719205528e-10 - Iteration (num., iy, grad. norm, func.) : 12 1 2.638366090e-10 2.718125940e-10 - Iteration (num., iy, grad. norm, func.) : 13 1 2.313891231e-10 2.718046144e-10 - Iteration (num., iy, grad. norm, func.) : 14 1 3.078632547e-10 2.717648760e-10 - Iteration (num., iy, grad. norm, func.) : 15 1 6.249570784e-11 2.715224474e-10 - Iteration (num., iy, grad. norm, func.) : 16 1 2.182565742e-11 2.713898108e-10 - Iteration (num., iy, grad. norm, func.) : 17 1 2.000325125e-11 2.713793312e-10 - Iteration (num., iy, grad. norm, func.) : 18 1 3.787829768e-11 2.713695682e-10 - Iteration (num., iy, grad. norm, func.) : 19 1 2.299532376e-11 2.713647656e-10 - Iteration (num., iy, grad. norm, func.) : 20 1 1.141532357e-11 2.713512118e-10 - Iteration (num., iy, grad. norm, func.) : 21 1 8.049776291e-12 2.713465706e-10 - Iteration (num., iy, grad. norm, func.) : 22 1 8.049707586e-12 2.713465706e-10 - Iteration (num., iy, grad. norm, func.) : 23 1 7.428044113e-12 2.713462233e-10 - Iteration (num., iy, grad. norm, func.) : 24 1 2.569718283e-12 2.713453914e-10 - Iteration (num., iy, grad. norm, func.) : 25 1 2.679080974e-12 2.713451341e-10 - Iteration (num., iy, grad. norm, func.) : 26 1 1.689724418e-12 2.713451284e-10 - Iteration (num., iy, grad. norm, func.) : 27 1 2.508507000e-12 2.713450420e-10 - Iteration (num., iy, grad. norm, func.) : 28 1 6.623578524e-13 2.713449735e-10 - Solving for output 1 - done. Time (sec): 0.3338523 - Solving nonlinear problem (n=400) - done. Time (sec): 0.4861753 - Solving for degrees of freedom - done. Time (sec): 0.5099332 - Training - done. Time (sec): 0.5160646 - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0005705 - - Prediction time/pt. (sec) : 0.0000011 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0005317 - - Prediction time/pt. (sec) : 0.0000011 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0006011 - - Prediction time/pt. (sec) : 0.0000012 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0005288 - - Prediction time/pt. (sec) : 0.0000011 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0005684 - - Prediction time/pt. (sec) : 0.0000011 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0005345 - - Prediction time/pt. (sec) : 0.0000011 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0005624 - - Prediction time/pt. (sec) : 0.0000011 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0005307 - - Prediction time/pt. (sec) : 0.0000011 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0005679 - - Prediction time/pt. (sec) : 0.0000011 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0005186 - - Prediction time/pt. (sec) : 0.0000010 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0005636 - - Prediction time/pt. (sec) : 0.0000011 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0005219 - - Prediction time/pt. (sec) : 0.0000010 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0005553 - - Prediction time/pt. (sec) : 0.0000011 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0005410 - - Prediction time/pt. (sec) : 0.0000011 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 2500 - - Predicting ... - Predicting - done. Time (sec): 0.0014277 - - Prediction time/pt. (sec) : 0.0000006 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 2500 - - Predicting ... - Predicting - done. Time (sec): 0.0013411 - - Prediction time/pt. (sec) : 0.0000005 - - -.. figure:: rans_crm_wing.png - :scale: 60 % - :align: center - -RMTC ----- - -.. code-block:: python - - from smt.surrogate_models import RMTC - from smt.examples.rans_crm_wing.rans_crm_wing import ( - get_rans_crm_wing, - plot_rans_crm_wing, - ) - - xt, yt, xlimits = get_rans_crm_wing() - - interp = RMTC( - num_elements=20, xlimits=xlimits, nonlinear_maxiter=100, energy_weight=1e-10 - ) - interp.set_training_values(xt, yt) - interp.train() - - plot_rans_crm_wing(xt, yt, xlimits, interp) - -:: - - ___________________________________________________________________________ - - RMTC - ___________________________________________________________________________ - - Problem size - - # training points. : 35 - - ___________________________________________________________________________ - - Training - - Training ... - Pre-computing matrices ... - Computing dof2coeff ... - Computing dof2coeff - done. Time (sec): 0.0031140 - Initializing Hessian ... - Initializing Hessian - done. Time (sec): 0.0003836 - Computing energy terms ... - Computing energy terms - done. Time (sec): 0.0130773 - Computing approximation terms ... - Computing approximation terms - done. Time (sec): 0.0009263 - Pre-computing matrices - done. Time (sec): 0.0175605 - Solving for degrees of freedom ... - Solving initial startup problem (n=1764) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 1.279175539e-01 1.114942861e-02 - Iteration (num., iy, grad. norm, func.) : 0 0 1.499206775e-05 2.184788477e-08 - Solving for output 0 - done. Time (sec): 0.0277364 - Solving for output 1 ... - Iteration (num., iy, grad. norm, func.) : 0 1 2.653045755e+00 4.799845498e+00 - Iteration (num., iy, grad. norm, func.) : 0 1 2.441435303e-04 6.147506677e-06 - Solving for output 1 - done. Time (sec): 0.0274818 - Solving initial startup problem (n=1764) - done. Time (sec): 0.0552840 - Solving nonlinear problem (n=1764) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 8.076561564e-07 2.166293139e-08 - Iteration (num., iy, grad. norm, func.) : 0 0 9.142058108e-07 1.723176225e-08 - Iteration (num., iy, grad. norm, func.) : 1 0 3.513935845e-07 3.252602993e-09 - Iteration (num., iy, grad. norm, func.) : 2 0 1.168214743e-07 1.046685700e-09 - Iteration (num., iy, grad. norm, func.) : 3 0 6.326509912e-08 5.331049254e-10 - Iteration (num., iy, grad. norm, func.) : 4 0 3.418239776e-08 4.111805755e-10 - Iteration (num., iy, grad. norm, func.) : 5 0 2.256295052e-08 3.761245235e-10 - Iteration (num., iy, grad. norm, func.) : 6 0 2.010448646e-08 3.714924187e-10 - Iteration (num., iy, grad. norm, func.) : 7 0 1.940154325e-08 3.710813074e-10 - Iteration (num., iy, grad. norm, func.) : 8 0 1.337930855e-08 3.606311245e-10 - Iteration (num., iy, grad. norm, func.) : 9 0 1.491946708e-08 3.420900947e-10 - Iteration (num., iy, grad. norm, func.) : 10 0 7.041005685e-09 3.069403043e-10 - Iteration (num., iy, grad. norm, func.) : 11 0 2.260877881e-09 2.895211131e-10 - Iteration (num., iy, grad. norm, func.) : 12 0 1.905111198e-09 2.891987822e-10 - Iteration (num., iy, grad. norm, func.) : 13 0 2.373905281e-09 2.889797702e-10 - Iteration (num., iy, grad. norm, func.) : 14 0 1.300032595e-09 2.879068517e-10 - Iteration (num., iy, grad. norm, func.) : 15 0 1.623847751e-09 2.871275978e-10 - Iteration (num., iy, grad. norm, func.) : 16 0 5.285589253e-10 2.868398442e-10 - Iteration (num., iy, grad. norm, func.) : 17 0 7.922712640e-10 2.868375443e-10 - Iteration (num., iy, grad. norm, func.) : 18 0 5.888225744e-10 2.868045063e-10 - Iteration (num., iy, grad. norm, func.) : 19 0 1.132370943e-09 2.867143548e-10 - Iteration (num., iy, grad. norm, func.) : 20 0 3.604109310e-10 2.866142544e-10 - Iteration (num., iy, grad. norm, func.) : 21 0 3.554534000e-10 2.866064690e-10 - Iteration (num., iy, grad. norm, func.) : 22 0 5.366990360e-10 2.866053737e-10 - Iteration (num., iy, grad. norm, func.) : 23 0 6.289675866e-10 2.865891836e-10 - Iteration (num., iy, grad. norm, func.) : 24 0 4.378955146e-10 2.865489267e-10 - Iteration (num., iy, grad. norm, func.) : 25 0 1.379311550e-10 2.865102796e-10 - Iteration (num., iy, grad. norm, func.) : 26 0 1.165470713e-10 2.865092134e-10 - Iteration (num., iy, grad. norm, func.) : 27 0 1.464146840e-10 2.865071791e-10 - Iteration (num., iy, grad. norm, func.) : 28 0 2.140809048e-10 2.865056805e-10 - Iteration (num., iy, grad. norm, func.) : 29 0 1.779605991e-10 2.865037521e-10 - Iteration (num., iy, grad. norm, func.) : 30 0 1.353338337e-10 2.865006425e-10 - Iteration (num., iy, grad. norm, func.) : 31 0 1.573426382e-10 2.865006039e-10 - Iteration (num., iy, grad. norm, func.) : 32 0 1.327424268e-10 2.864992021e-10 - Iteration (num., iy, grad. norm, func.) : 33 0 1.054787324e-10 2.864971098e-10 - Iteration (num., iy, grad. norm, func.) : 34 0 6.138718620e-11 2.864947948e-10 - Iteration (num., iy, grad. norm, func.) : 35 0 6.123819197e-11 2.864945453e-10 - Iteration (num., iy, grad. norm, func.) : 36 0 5.048750440e-11 2.864943135e-10 - Iteration (num., iy, grad. norm, func.) : 37 0 9.400031192e-11 2.864939967e-10 - Iteration (num., iy, grad. norm, func.) : 38 0 4.371914921e-11 2.864935099e-10 - Iteration (num., iy, grad. norm, func.) : 39 0 5.958027127e-11 2.864932592e-10 - Iteration (num., iy, grad. norm, func.) : 40 0 2.998739184e-11 2.864930129e-10 - Iteration (num., iy, grad. norm, func.) : 41 0 4.778589257e-11 2.864930058e-10 - Iteration (num., iy, grad. norm, func.) : 42 0 3.013371687e-11 2.864929464e-10 - Iteration (num., iy, grad. norm, func.) : 43 0 3.926946635e-11 2.864928617e-10 - Iteration (num., iy, grad. norm, func.) : 44 0 1.860388074e-11 2.864927282e-10 - Iteration (num., iy, grad. norm, func.) : 45 0 3.509312961e-11 2.864925126e-10 - Iteration (num., iy, grad. norm, func.) : 46 0 5.523999039e-12 2.864924653e-10 - Iteration (num., iy, grad. norm, func.) : 47 0 5.523975380e-12 2.864924653e-10 - Iteration (num., iy, grad. norm, func.) : 48 0 5.523975230e-12 2.864924653e-10 - Iteration (num., iy, grad. norm, func.) : 49 0 1.265174122e-11 2.864924601e-10 - Iteration (num., iy, grad. norm, func.) : 50 0 4.186351647e-12 2.864924394e-10 - Iteration (num., iy, grad. norm, func.) : 51 0 1.052835390e-11 2.864924247e-10 - Iteration (num., iy, grad. norm, func.) : 52 0 2.137695426e-12 2.864924196e-10 - Iteration (num., iy, grad. norm, func.) : 53 0 2.134309686e-12 2.864924196e-10 - Iteration (num., iy, grad. norm, func.) : 54 0 2.093960395e-12 2.864924196e-10 - Iteration (num., iy, grad. norm, func.) : 55 0 3.157577132e-12 2.864924188e-10 - Iteration (num., iy, grad. norm, func.) : 56 0 1.482896255e-12 2.864924177e-10 - Iteration (num., iy, grad. norm, func.) : 57 0 3.358318539e-12 2.864924169e-10 - Iteration (num., iy, grad. norm, func.) : 58 0 1.046351705e-12 2.864924160e-10 - Iteration (num., iy, grad. norm, func.) : 59 0 7.979468294e-13 2.864924160e-10 - Solving for output 0 - done. Time (sec): 1.5348873 - Solving for output 1 ... - Iteration (num., iy, grad. norm, func.) : 0 1 1.342743255e-05 6.111600925e-06 - Iteration (num., iy, grad. norm, func.) : 0 1 1.302307595e-05 5.880442538e-06 - Iteration (num., iy, grad. norm, func.) : 1 1 1.363985006e-05 7.781714954e-07 - Iteration (num., iy, grad. norm, func.) : 2 1 1.316050272e-05 2.746010755e-07 - Iteration (num., iy, grad. norm, func.) : 3 1 4.268967712e-06 1.033367573e-07 - Iteration (num., iy, grad. norm, func.) : 4 1 3.219792715e-06 6.703442433e-08 - Iteration (num., iy, grad. norm, func.) : 5 1 2.039238895e-06 4.211149536e-08 - Iteration (num., iy, grad. norm, func.) : 6 1 6.033889795e-07 2.515999581e-08 - Iteration (num., iy, grad. norm, func.) : 7 1 4.726317346e-07 2.471919997e-08 - Iteration (num., iy, grad. norm, func.) : 8 1 3.696022676e-07 2.449846324e-08 - Iteration (num., iy, grad. norm, func.) : 9 1 2.488052165e-07 2.070463385e-08 - Iteration (num., iy, grad. norm, func.) : 10 1 8.459610012e-08 1.660646850e-08 - Iteration (num., iy, grad. norm, func.) : 11 1 5.658202980e-08 1.500183477e-08 - Iteration (num., iy, grad. norm, func.) : 12 1 3.804107541e-08 1.481787787e-08 - Iteration (num., iy, grad. norm, func.) : 13 1 3.468856986e-08 1.480601481e-08 - Iteration (num., iy, grad. norm, func.) : 14 1 2.832164294e-08 1.477977984e-08 - Iteration (num., iy, grad. norm, func.) : 15 1 3.791982085e-08 1.464243575e-08 - Iteration (num., iy, grad. norm, func.) : 16 1 5.401835358e-09 1.448701556e-08 - Iteration (num., iy, grad. norm, func.) : 17 1 6.403115896e-09 1.448630627e-08 - Iteration (num., iy, grad. norm, func.) : 18 1 6.951789243e-09 1.448477870e-08 - Iteration (num., iy, grad. norm, func.) : 19 1 6.270760852e-09 1.448358882e-08 - Iteration (num., iy, grad. norm, func.) : 20 1 1.238561409e-08 1.447833451e-08 - Iteration (num., iy, grad. norm, func.) : 21 1 2.463512479e-09 1.446909735e-08 - Iteration (num., iy, grad. norm, func.) : 22 1 4.051882686e-09 1.446825445e-08 - Iteration (num., iy, grad. norm, func.) : 23 1 3.421367398e-09 1.446789229e-08 - Iteration (num., iy, grad. norm, func.) : 24 1 3.514206340e-09 1.446641231e-08 - Iteration (num., iy, grad. norm, func.) : 25 1 1.892268440e-09 1.446502108e-08 - Iteration (num., iy, grad. norm, func.) : 26 1 1.591286939e-09 1.446489562e-08 - Iteration (num., iy, grad. norm, func.) : 27 1 1.843657461e-09 1.446464627e-08 - Iteration (num., iy, grad. norm, func.) : 28 1 1.367852603e-09 1.446432383e-08 - Iteration (num., iy, grad. norm, func.) : 29 1 1.279521953e-09 1.446413608e-08 - Iteration (num., iy, grad. norm, func.) : 30 1 1.273827484e-09 1.446399608e-08 - Iteration (num., iy, grad. norm, func.) : 31 1 1.069805193e-09 1.446389193e-08 - Iteration (num., iy, grad. norm, func.) : 32 1 7.203325145e-10 1.446383478e-08 - Iteration (num., iy, grad. norm, func.) : 33 1 1.092512543e-09 1.446377574e-08 - Iteration (num., iy, grad. norm, func.) : 34 1 4.178765181e-10 1.446367228e-08 - Iteration (num., iy, grad. norm, func.) : 35 1 4.695642060e-10 1.446365013e-08 - Iteration (num., iy, grad. norm, func.) : 36 1 4.325042578e-10 1.446364130e-08 - Iteration (num., iy, grad. norm, func.) : 37 1 5.822577984e-10 1.446363420e-08 - Iteration (num., iy, grad. norm, func.) : 38 1 3.233691142e-10 1.446362135e-08 - Iteration (num., iy, grad. norm, func.) : 39 1 4.820930105e-10 1.446360509e-08 - Iteration (num., iy, grad. norm, func.) : 40 1 1.744254280e-10 1.446358617e-08 - Iteration (num., iy, grad. norm, func.) : 41 1 2.456939862e-10 1.446357670e-08 - Iteration (num., iy, grad. norm, func.) : 42 1 1.238702485e-10 1.446357147e-08 - Iteration (num., iy, grad. norm, func.) : 43 1 1.543359477e-10 1.446356991e-08 - Iteration (num., iy, grad. norm, func.) : 44 1 1.277822190e-10 1.446356861e-08 - Iteration (num., iy, grad. norm, func.) : 45 1 1.668908981e-10 1.446356678e-08 - Iteration (num., iy, grad. norm, func.) : 46 1 9.442876131e-11 1.446356489e-08 - Iteration (num., iy, grad. norm, func.) : 47 1 1.282415478e-10 1.446356272e-08 - Iteration (num., iy, grad. norm, func.) : 48 1 3.329918973e-11 1.446356055e-08 - Iteration (num., iy, grad. norm, func.) : 49 1 3.252153760e-11 1.446356044e-08 - Iteration (num., iy, grad. norm, func.) : 50 1 3.862246500e-11 1.446356030e-08 - Iteration (num., iy, grad. norm, func.) : 51 1 5.495626102e-11 1.446356009e-08 - Iteration (num., iy, grad. norm, func.) : 52 1 4.880391139e-11 1.446355966e-08 - Iteration (num., iy, grad. norm, func.) : 53 1 1.034429247e-11 1.446355931e-08 - Iteration (num., iy, grad. norm, func.) : 54 1 8.689390856e-12 1.446355931e-08 - Iteration (num., iy, grad. norm, func.) : 55 1 1.416075314e-11 1.446355930e-08 - Iteration (num., iy, grad. norm, func.) : 56 1 1.783053026e-11 1.446355927e-08 - Iteration (num., iy, grad. norm, func.) : 57 1 1.729488623e-11 1.446355926e-08 - Iteration (num., iy, grad. norm, func.) : 58 1 2.040960803e-11 1.446355923e-08 - Iteration (num., iy, grad. norm, func.) : 59 1 1.407883626e-11 1.446355921e-08 - Iteration (num., iy, grad. norm, func.) : 60 1 9.517528113e-12 1.446355919e-08 - Iteration (num., iy, grad. norm, func.) : 61 1 8.451941968e-12 1.446355917e-08 - Iteration (num., iy, grad. norm, func.) : 62 1 5.420196283e-12 1.446355916e-08 - Iteration (num., iy, grad. norm, func.) : 63 1 5.295256023e-12 1.446355916e-08 - Iteration (num., iy, grad. norm, func.) : 64 1 4.120626843e-12 1.446355916e-08 - Iteration (num., iy, grad. norm, func.) : 65 1 7.434819577e-12 1.446355915e-08 - Iteration (num., iy, grad. norm, func.) : 66 1 2.550829551e-12 1.446355915e-08 - Iteration (num., iy, grad. norm, func.) : 67 1 2.999511018e-12 1.446355915e-08 - Iteration (num., iy, grad. norm, func.) : 68 1 2.740742587e-12 1.446355915e-08 - Iteration (num., iy, grad. norm, func.) : 69 1 1.682896306e-12 1.446355915e-08 - Iteration (num., iy, grad. norm, func.) : 70 1 1.535618333e-12 1.446355915e-08 - Iteration (num., iy, grad. norm, func.) : 71 1 1.955032007e-12 1.446355915e-08 - Iteration (num., iy, grad. norm, func.) : 72 1 1.417431703e-12 1.446355915e-08 - Iteration (num., iy, grad. norm, func.) : 73 1 1.860560236e-12 1.446355915e-08 - Iteration (num., iy, grad. norm, func.) : 74 1 6.174723151e-13 1.446355915e-08 - Solving for output 1 - done. Time (sec): 1.8742981 - Solving nonlinear problem (n=1764) - done. Time (sec): 3.4092407 - Solving for degrees of freedom - done. Time (sec): 3.4645884 - Training - done. Time (sec): 3.4828000 - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0009661 - - Prediction time/pt. (sec) : 0.0000019 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0008864 - - Prediction time/pt. (sec) : 0.0000018 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0010107 - - Prediction time/pt. (sec) : 0.0000020 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0009747 - - Prediction time/pt. (sec) : 0.0000019 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0009539 - - Prediction time/pt. (sec) : 0.0000019 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0009573 - - Prediction time/pt. (sec) : 0.0000019 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0010092 - - Prediction time/pt. (sec) : 0.0000020 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0009434 - - Prediction time/pt. (sec) : 0.0000019 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0009990 - - Prediction time/pt. (sec) : 0.0000020 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0009410 - - Prediction time/pt. (sec) : 0.0000019 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0009904 - - Prediction time/pt. (sec) : 0.0000020 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0009341 - - Prediction time/pt. (sec) : 0.0000019 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0009837 - - Prediction time/pt. (sec) : 0.0000020 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 500 - - Predicting ... - Predicting - done. Time (sec): 0.0009243 - - Prediction time/pt. (sec) : 0.0000018 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 2500 - - Predicting ... - Predicting - done. Time (sec): 0.0036161 - - Prediction time/pt. (sec) : 0.0000014 - - ___________________________________________________________________________ - - Evaluation - - # eval points. : 2500 - - Predicting ... - Predicting - done. Time (sec): 0.0034854 - - Prediction time/pt. (sec) : 0.0000014 - - -.. figure:: rans_crm_wing.png - :scale: 60 % - :align: center +RANS CRM wing 2-D data set +========================== + +.. code-block:: python + + import numpy as np + + + raw = np.array( + [ + [ + 2.000000000000000000e00, + 4.500000000000000111e-01, + 1.536799999999999972e-02, + 3.674239999999999728e-01, + 5.592279999999999474e-01, + -1.258039999999999992e-01, + -1.248699999999999984e-02, + ], + [ + 3.500000000000000000e00, + 4.500000000000000111e-01, + 1.985100000000000059e-02, + 4.904470000000000218e-01, + 7.574600000000000222e-01, + -1.615260000000000029e-01, + 8.987000000000000197e-03, + ], + [ + 5.000000000000000000e00, + 4.500000000000000111e-01, + 2.571000000000000021e-02, + 6.109189999999999898e-01, + 9.497949999999999449e-01, + -1.954619999999999969e-01, + 4.090900000000000092e-02, + ], + [ + 6.500000000000000000e00, + 4.500000000000000111e-01, + 3.304200000000000192e-02, + 7.266120000000000356e-01, + 1.131138999999999895e00, + -2.255890000000000117e-01, + 8.185399999999999621e-02, + ], + [ + 8.000000000000000000e00, + 4.500000000000000111e-01, + 4.318999999999999923e-02, + 8.247250000000000414e-01, + 1.271487000000000034e00, + -2.397040000000000004e-01, + 1.217659999999999992e-01, + ], + [ + 0.000000000000000000e00, + 5.799999999999999600e-01, + 1.136200000000000057e-02, + 2.048760000000000026e-01, + 2.950280000000000125e-01, + -7.882100000000000217e-02, + -2.280099999999999835e-02, + ], + [ + 1.500000000000000000e00, + 5.799999999999999600e-01, + 1.426000000000000011e-02, + 3.375619999999999732e-01, + 5.114130000000000065e-01, + -1.189420000000000061e-01, + -1.588200000000000028e-02, + ], + [ + 3.000000000000000000e00, + 5.799999999999999600e-01, + 1.866400000000000003e-02, + 4.687450000000000228e-01, + 7.240400000000000169e-01, + -1.577669999999999906e-01, + 3.099999999999999891e-03, + ], + [ + 4.500000000000000000e00, + 5.799999999999999600e-01, + 2.461999999999999952e-02, + 5.976639999999999731e-01, + 9.311709999999999710e-01, + -1.944160000000000055e-01, + 3.357500000000000068e-02, + ], + [ + 6.000000000000000000e00, + 5.799999999999999600e-01, + 3.280700000000000283e-02, + 7.142249999999999988e-01, + 1.111707999999999918e00, + -2.205870000000000053e-01, + 7.151699999999999724e-02, + ], + [ + 0.000000000000000000e00, + 6.800000000000000488e-01, + 1.138800000000000055e-02, + 2.099310000000000065e-01, + 3.032230000000000203e-01, + -8.187899999999999345e-02, + -2.172699999999999979e-02, + ], + [ + 1.500000000000000000e00, + 6.800000000000000488e-01, + 1.458699999999999927e-02, + 3.518569999999999753e-01, + 5.356630000000000003e-01, + -1.257649999999999879e-01, + -1.444800000000000077e-02, + ], + [ + 3.000000000000000000e00, + 6.800000000000000488e-01, + 1.952800000000000022e-02, + 4.924879999999999813e-01, + 7.644769999999999621e-01, + -1.678040000000000087e-01, + 6.023999999999999841e-03, + ], + [ + 4.500000000000000000e00, + 6.800000000000000488e-01, + 2.666699999999999973e-02, + 6.270339999999999803e-01, + 9.801630000000000065e-01, + -2.035240000000000105e-01, + 3.810000000000000192e-02, + ], + [ + 6.000000000000000000e00, + 6.800000000000000488e-01, + 3.891800000000000120e-02, + 7.172730000000000494e-01, + 1.097855999999999943e00, + -2.014620000000000022e-01, + 6.640000000000000069e-02, + ], + [ + 0.000000000000000000e00, + 7.500000000000000000e-01, + 1.150699999999999987e-02, + 2.149069999999999869e-01, + 3.115740000000000176e-01, + -8.498999999999999611e-02, + -2.057700000000000154e-02, + ], + [ + 1.250000000000000000e00, + 7.500000000000000000e-01, + 1.432600000000000019e-02, + 3.415969999999999840e-01, + 5.199390000000000400e-01, + -1.251009999999999900e-01, + -1.515400000000000080e-02, + ], + [ + 2.500000000000000000e00, + 7.500000000000000000e-01, + 1.856000000000000011e-02, + 4.677589999999999804e-01, + 7.262499999999999512e-01, + -1.635169999999999957e-01, + 3.989999999999999949e-04, + ], + [ + 3.750000000000000000e00, + 7.500000000000000000e-01, + 2.472399999999999945e-02, + 5.911459999999999493e-01, + 9.254930000000000101e-01, + -1.966150000000000120e-01, + 2.524900000000000061e-02, + ], + [ + 5.000000000000000000e00, + 7.500000000000000000e-01, + 3.506800000000000195e-02, + 7.047809999999999908e-01, + 1.097736000000000045e00, + -2.143069999999999975e-01, + 5.321300000000000335e-02, + ], + [ + 0.000000000000000000e00, + 8.000000000000000444e-01, + 1.168499999999999921e-02, + 2.196390000000000009e-01, + 3.197160000000000002e-01, + -8.798200000000000465e-02, + -1.926999999999999894e-02, + ], + [ + 1.250000000000000000e00, + 8.000000000000000444e-01, + 1.481599999999999931e-02, + 3.553939999999999877e-01, + 5.435950000000000504e-01, + -1.317419999999999980e-01, + -1.345599999999999921e-02, + ], + [ + 2.500000000000000000e00, + 8.000000000000000444e-01, + 1.968999999999999917e-02, + 4.918299999999999894e-01, + 7.669930000000000359e-01, + -1.728079999999999894e-01, + 3.756999999999999923e-03, + ], + [ + 3.750000000000000000e00, + 8.000000000000000444e-01, + 2.785599999999999882e-02, + 6.324319999999999942e-01, + 9.919249999999999456e-01, + -2.077100000000000057e-01, + 3.159800000000000109e-02, + ], + [ + 5.000000000000000000e00, + 8.000000000000000444e-01, + 4.394300000000000289e-02, + 7.650689999999999991e-01, + 1.188355999999999968e00, + -2.332680000000000031e-01, + 5.645000000000000018e-02, + ], + [ + 0.000000000000000000e00, + 8.299999999999999600e-01, + 1.186100000000000002e-02, + 2.232899999999999885e-01, + 3.261100000000000110e-01, + -9.028400000000000314e-02, + -1.806500000000000120e-02, + ], + [ + 1.000000000000000000e00, + 8.299999999999999600e-01, + 1.444900000000000004e-02, + 3.383419999999999761e-01, + 5.161710000000000464e-01, + -1.279530000000000112e-01, + -1.402400000000000001e-02, + ], + [ + 2.000000000000000000e00, + 8.299999999999999600e-01, + 1.836799999999999891e-02, + 4.554270000000000262e-01, + 7.082190000000000429e-01, + -1.642339999999999911e-01, + -1.793000000000000106e-03, + ], + [ + 3.000000000000000000e00, + 8.299999999999999600e-01, + 2.466899999999999996e-02, + 5.798410000000000508e-01, + 9.088819999999999677e-01, + -2.004589999999999983e-01, + 1.892900000000000138e-02, + ], + [ + 4.000000000000000000e00, + 8.299999999999999600e-01, + 3.700400000000000217e-02, + 7.012720000000000065e-01, + 1.097366000000000064e00, + -2.362420000000000075e-01, + 3.750699999999999867e-02, + ], + [ + 0.000000000000000000e00, + 8.599999999999999867e-01, + 1.224300000000000041e-02, + 2.278100000000000125e-01, + 3.342720000000000136e-01, + -9.307600000000000595e-02, + -1.608400000000000107e-02, + ], + [ + 1.000000000000000000e00, + 8.599999999999999867e-01, + 1.540700000000000056e-02, + 3.551839999999999997e-01, + 5.433130000000000459e-01, + -1.364730000000000110e-01, + -1.162200000000000039e-02, + ], + [ + 2.000000000000000000e00, + 8.599999999999999867e-01, + 2.122699999999999934e-02, + 4.854620000000000046e-01, + 7.552919999999999634e-01, + -1.817850000000000021e-01, + 1.070999999999999903e-03, + ], + [ + 3.000000000000000000e00, + 8.599999999999999867e-01, + 3.178899999999999781e-02, + 6.081849999999999756e-01, + 9.510380000000000500e-01, + -2.252020000000000133e-01, + 1.540799999999999982e-02, + ], + [ + 4.000000000000000000e00, + 8.599999999999999867e-01, + 4.744199999999999806e-02, + 6.846989999999999466e-01, + 1.042564000000000046e00, + -2.333600000000000119e-01, + 2.035400000000000056e-02, + ], + ] + ) + + + def get_rans_crm_wing(): + # data structure: + # alpha, mach, cd, cl, cmx, cmy, cmz + + deg2rad = np.pi / 180.0 + + xt = np.array(raw[:, 0:2]) + yt = np.array(raw[:, 2:4]) + xlimits = np.array([[-3.0, 10.0], [0.4, 0.90]]) + + xt[:, 0] *= deg2rad + xlimits[0, :] *= deg2rad + + return xt, yt, xlimits + + + def plot_rans_crm_wing(xt, yt, limits, interp): + import numpy as np + import matplotlib + + matplotlib.use("Agg") + import matplotlib.pyplot as plt + + rad2deg = 180.0 / np.pi + + num = 500 + num_a = 50 + num_M = 50 + + x = np.zeros((num, 2)) + colors = ["b", "g", "r", "c", "m", "k", "y"] + + nrow = 3 + ncol = 2 + + plt.close() + fig, axs = plt.subplots(3, 2, figsize=(15, 15)) + + # ----------------------------------------------------------------------------- + + mach_numbers = [0.45, 0.68, 0.80, 0.86] + legend_entries = [] + + alpha_sweep = np.linspace(0.0, 8.0, num) + + for ind, mach in enumerate(mach_numbers): + x[:, 0] = alpha_sweep / rad2deg + x[:, 1] = mach + CD = interp.predict_values(x)[:, 0] + CL = interp.predict_values(x)[:, 1] + + mask = np.abs(xt[:, 1] - mach) < 1e-10 + axs[0, 0].plot(xt[mask, 0] * rad2deg, yt[mask, 0], "o" + colors[ind]) + axs[0, 0].plot(alpha_sweep, CD, colors[ind]) + + mask = np.abs(xt[:, 1] - mach) < 1e-10 + axs[0, 1].plot(xt[mask, 0] * rad2deg, yt[mask, 1], "o" + colors[ind]) + axs[0, 1].plot(alpha_sweep, CL, colors[ind]) + + legend_entries.append("M={}".format(mach)) + legend_entries.append("exact") + + axs[0, 0].set(xlabel="alpha (deg)", ylabel="CD") + axs[0, 0].legend(legend_entries) + + axs[0, 1].set(xlabel="alpha (deg)", ylabel="CL") + axs[0, 1].legend(legend_entries) + + # ----------------------------------------------------------------------------- + + alphas = [2.0, 4.0, 6.0] + legend_entries = [] + + mach_sweep = np.linspace(0.45, 0.86, num) + + for ind, alpha in enumerate(alphas): + x[:, 0] = alpha / rad2deg + x[:, 1] = mach_sweep + CD = interp.predict_values(x)[:, 0] + CL = interp.predict_values(x)[:, 1] + + axs[1, 0].plot(mach_sweep, CD, colors[ind]) + axs[1, 1].plot(mach_sweep, CL, colors[ind]) + + legend_entries.append("alpha={}".format(alpha)) + + axs[1, 0].set(xlabel="Mach number", ylabel="CD") + axs[1, 0].legend(legend_entries) + + axs[1, 1].set(xlabel="Mach number", ylabel="CL") + axs[1, 1].legend(legend_entries) + + # ----------------------------------------------------------------------------- + + x = np.zeros((num_a, num_M, 2)) + x[:, :, 0] = np.outer(np.linspace(0.0, 8.0, num_a), np.ones(num_M)) / rad2deg + x[:, :, 1] = np.outer(np.ones(num_a), np.linspace(0.45, 0.86, num_M)) + CD = interp.predict_values(x.reshape((num_a * num_M, 2)))[:, 0].reshape( + (num_a, num_M) + ) + CL = interp.predict_values(x.reshape((num_a * num_M, 2)))[:, 1].reshape( + (num_a, num_M) + ) + + axs[2, 0].plot(xt[:, 1], xt[:, 0] * rad2deg, "o") + axs[2, 0].contour(x[:, :, 1], x[:, :, 0] * rad2deg, CD, 20) + pcm1 = axs[2, 0].pcolormesh( + x[:, :, 1], + x[:, :, 0] * rad2deg, + CD, + cmap=plt.get_cmap("rainbow"), + shading="auto", + ) + fig.colorbar(pcm1, ax=axs[2, 0]) + axs[2, 0].set(xlabel="Mach number", ylabel="alpha (deg)") + axs[2, 0].set_title("CD") + + axs[2, 1].plot(xt[:, 1], xt[:, 0] * rad2deg, "o") + axs[2, 1].contour(x[:, :, 1], x[:, :, 0] * rad2deg, CL, 20) + pcm2 = axs[2, 1].pcolormesh( + x[:, :, 1], + x[:, :, 0] * rad2deg, + CL, + cmap=plt.get_cmap("rainbow"), + shading="auto", + ) + fig.colorbar(pcm2, ax=axs[2, 1]) + axs[2, 1].set(xlabel="Mach number", ylabel="alpha (deg)") + axs[2, 1].set_title("CL") + + plt.show() + + +RMTB +---- + +.. code-block:: python + + from smt.surrogate_models import RMTB + from smt.examples.rans_crm_wing.rans_crm_wing import ( + get_rans_crm_wing, + plot_rans_crm_wing, + ) + + xt, yt, xlimits = get_rans_crm_wing() + + interp = RMTB( + num_ctrl_pts=20, xlimits=xlimits, nonlinear_maxiter=100, energy_weight=1e-12 + ) + interp.set_training_values(xt, yt) + interp.train() + + plot_rans_crm_wing(xt, yt, xlimits, interp) + +:: + + ___________________________________________________________________________ + + RMTB + ___________________________________________________________________________ + + Problem size + + # training points. : 35 + + ___________________________________________________________________________ + + Training + + Training ... + Pre-computing matrices ... + Computing dof2coeff ... + Computing dof2coeff - done. Time (sec): 0.0000000 + Initializing Hessian ... + Initializing Hessian - done. Time (sec): 0.0000000 + Computing energy terms ... + Computing energy terms - done. Time (sec): 0.0040228 + Computing approximation terms ... + Computing approximation terms - done. Time (sec): 0.0000000 + Pre-computing matrices - done. Time (sec): 0.0040228 + Solving for degrees of freedom ... + Solving initial startup problem (n=400) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 9.429150220e-02 1.114942861e-02 + Iteration (num., iy, grad. norm, func.) : 0 0 1.143986917e-08 1.793039631e-10 + Solving for output 0 - done. Time (sec): 0.0069823 + Solving for output 1 ... + Iteration (num., iy, grad. norm, func.) : 0 1 1.955493282e+00 4.799845498e+00 + Iteration (num., iy, grad. norm, func.) : 0 1 2.384072909e-06 4.568551517e-08 + Solving for output 1 - done. Time (sec): 0.0069814 + Solving initial startup problem (n=400) - done. Time (sec): 0.0139637 + Solving nonlinear problem (n=400) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 6.652690767e-09 1.793037175e-10 + Iteration (num., iy, grad. norm, func.) : 0 0 5.849579371e-09 1.703954904e-10 + Iteration (num., iy, grad. norm, func.) : 1 0 3.029765479e-08 1.034424518e-10 + Iteration (num., iy, grad. norm, func.) : 2 0 1.126327726e-08 2.505953287e-11 + Iteration (num., iy, grad. norm, func.) : 3 0 3.684480315e-09 1.065597406e-11 + Iteration (num., iy, grad. norm, func.) : 4 0 2.264648657e-09 9.297031284e-12 + Iteration (num., iy, grad. norm, func.) : 5 0 6.433274344e-10 7.375855307e-12 + Iteration (num., iy, grad. norm, func.) : 6 0 1.745403314e-10 6.524960110e-12 + Iteration (num., iy, grad. norm, func.) : 7 0 3.515164760e-11 6.261432455e-12 + Iteration (num., iy, grad. norm, func.) : 8 0 2.311171583e-11 6.261269938e-12 + Iteration (num., iy, grad. norm, func.) : 9 0 1.659125824e-11 6.260501115e-12 + Iteration (num., iy, grad. norm, func.) : 10 0 1.285972581e-11 6.260095232e-12 + Iteration (num., iy, grad. norm, func.) : 11 0 2.948840801e-12 6.256556241e-12 + Iteration (num., iy, grad. norm, func.) : 12 0 4.853416906e-13 6.255686534e-12 + Solving for output 0 - done. Time (sec): 0.0907333 + Solving for output 1 ... + Iteration (num., iy, grad. norm, func.) : 0 1 9.721474920e-08 4.567635024e-08 + Iteration (num., iy, grad. norm, func.) : 0 1 9.329075021e-08 4.538184815e-08 + Iteration (num., iy, grad. norm, func.) : 1 1 2.915771512e-06 3.263822593e-08 + Iteration (num., iy, grad. norm, func.) : 2 1 8.640091715e-07 4.653851041e-09 + Iteration (num., iy, grad. norm, func.) : 3 1 3.744485513e-07 2.548911362e-09 + Iteration (num., iy, grad. norm, func.) : 4 1 3.391955543e-07 2.376502583e-09 + Iteration (num., iy, grad. norm, func.) : 5 1 1.016715187e-07 7.621065834e-10 + Iteration (num., iy, grad. norm, func.) : 6 1 2.973196096e-08 5.068032616e-10 + Iteration (num., iy, grad. norm, func.) : 7 1 1.726322996e-08 4.692354715e-10 + Iteration (num., iy, grad. norm, func.) : 8 1 5.115932969e-09 3.869684142e-10 + Iteration (num., iy, grad. norm, func.) : 9 1 1.424825099e-09 2.978612739e-10 + Iteration (num., iy, grad. norm, func.) : 10 1 3.388061716e-10 2.720847561e-10 + Iteration (num., iy, grad. norm, func.) : 11 1 3.085067403e-10 2.720573550e-10 + Iteration (num., iy, grad. norm, func.) : 12 1 1.850842452e-10 2.719821212e-10 + Iteration (num., iy, grad. norm, func.) : 13 1 1.873073210e-10 2.717815229e-10 + Iteration (num., iy, grad. norm, func.) : 14 1 2.846101886e-11 2.714550183e-10 + Iteration (num., iy, grad. norm, func.) : 15 1 6.763872715e-11 2.714377475e-10 + Iteration (num., iy, grad. norm, func.) : 16 1 2.942258822e-11 2.714091442e-10 + Iteration (num., iy, grad. norm, func.) : 17 1 2.345315177e-11 2.713812224e-10 + Iteration (num., iy, grad. norm, func.) : 18 1 7.043230003e-11 2.713685462e-10 + Iteration (num., iy, grad. norm, func.) : 19 1 1.992995922e-11 2.713580756e-10 + Iteration (num., iy, grad. norm, func.) : 20 1 7.780956057e-12 2.713512268e-10 + Iteration (num., iy, grad. norm, func.) : 21 1 2.639523471e-11 2.713496139e-10 + Iteration (num., iy, grad. norm, func.) : 22 1 7.530467475e-12 2.713478995e-10 + Iteration (num., iy, grad. norm, func.) : 23 1 8.808167765e-12 2.713470106e-10 + Iteration (num., iy, grad. norm, func.) : 24 1 3.650499393e-12 2.713457227e-10 + Iteration (num., iy, grad. norm, func.) : 25 1 4.098006342e-12 2.713453909e-10 + Iteration (num., iy, grad. norm, func.) : 26 1 2.122843484e-12 2.713452860e-10 + Iteration (num., iy, grad. norm, func.) : 27 1 4.686426717e-12 2.713452133e-10 + Iteration (num., iy, grad. norm, func.) : 28 1 7.792791774e-13 2.713450380e-10 + Solving for output 1 - done. Time (sec): 0.2094588 + Solving nonlinear problem (n=400) - done. Time (sec): 0.3001921 + Solving for degrees of freedom - done. Time (sec): 0.3141558 + Training - done. Time (sec): 0.3181787 + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0009985 + + Prediction time/pt. (sec) : 0.0000020 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0009997 + + Prediction time/pt. (sec) : 0.0000020 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0009973 + + Prediction time/pt. (sec) : 0.0000020 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0009987 + + Prediction time/pt. (sec) : 0.0000020 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0009973 + + Prediction time/pt. (sec) : 0.0000020 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0009973 + + Prediction time/pt. (sec) : 0.0000020 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 2500 + + Predicting ... + Predicting - done. Time (sec): 0.0009975 + + Prediction time/pt. (sec) : 0.0000004 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 2500 + + Predicting ... + Predicting - done. Time (sec): 0.0009973 + + Prediction time/pt. (sec) : 0.0000004 + + +.. figure:: rans_crm_wing.png + :scale: 60 % + :align: center + +RMTC +---- + +.. code-block:: python + + from smt.surrogate_models import RMTC + from smt.examples.rans_crm_wing.rans_crm_wing import ( + get_rans_crm_wing, + plot_rans_crm_wing, + ) + + xt, yt, xlimits = get_rans_crm_wing() + + interp = RMTC( + num_elements=20, xlimits=xlimits, nonlinear_maxiter=100, energy_weight=1e-10 + ) + interp.set_training_values(xt, yt) + interp.train() + + plot_rans_crm_wing(xt, yt, xlimits, interp) + +:: + + ___________________________________________________________________________ + + RMTC + ___________________________________________________________________________ + + Problem size + + # training points. : 35 + + ___________________________________________________________________________ + + Training + + Training ... + Pre-computing matrices ... + Computing dof2coeff ... + Computing dof2coeff - done. Time (sec): 0.0029919 + Initializing Hessian ... + Initializing Hessian - done. Time (sec): 0.0000000 + Computing energy terms ... + Computing energy terms - done. Time (sec): 0.0080123 + Computing approximation terms ... + Computing approximation terms - done. Time (sec): 0.0010080 + Pre-computing matrices - done. Time (sec): 0.0120122 + Solving for degrees of freedom ... + Solving initial startup problem (n=1764) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 1.279175539e-01 1.114942861e-02 + Iteration (num., iy, grad. norm, func.) : 0 0 1.892260075e-05 2.158606140e-08 + Solving for output 0 - done. Time (sec): 0.0159523 + Solving for output 1 ... + Iteration (num., iy, grad. norm, func.) : 0 1 2.653045755e+00 4.799845498e+00 + Iteration (num., iy, grad. norm, func.) : 0 1 2.577030681e-04 6.438878057e-06 + Solving for output 1 - done. Time (sec): 0.0175278 + Solving initial startup problem (n=1764) - done. Time (sec): 0.0334802 + Solving nonlinear problem (n=1764) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 7.702060163e-07 2.130719039e-08 + Iteration (num., iy, grad. norm, func.) : 0 0 8.828496717e-07 1.695743786e-08 + Iteration (num., iy, grad. norm, func.) : 1 0 3.480009880e-07 3.230274515e-09 + Iteration (num., iy, grad. norm, func.) : 2 0 1.147855651e-07 1.039649991e-09 + Iteration (num., iy, grad. norm, func.) : 3 0 6.174786868e-08 5.309471405e-10 + Iteration (num., iy, grad. norm, func.) : 4 0 3.455593760e-08 4.118404972e-10 + Iteration (num., iy, grad. norm, func.) : 5 0 2.266947966e-08 3.769645615e-10 + Iteration (num., iy, grad. norm, func.) : 6 0 2.014457758e-08 3.726536483e-10 + Iteration (num., iy, grad. norm, func.) : 7 0 2.062330624e-08 3.720494065e-10 + Iteration (num., iy, grad. norm, func.) : 8 0 1.381296471e-08 3.609653111e-10 + Iteration (num., iy, grad. norm, func.) : 9 0 1.506017697e-08 3.419689514e-10 + Iteration (num., iy, grad. norm, func.) : 10 0 7.062492269e-09 3.064514419e-10 + Iteration (num., iy, grad. norm, func.) : 11 0 2.380029319e-09 2.894188546e-10 + Iteration (num., iy, grad. norm, func.) : 12 0 2.069095742e-09 2.893749129e-10 + Iteration (num., iy, grad. norm, func.) : 13 0 3.378089318e-09 2.892327518e-10 + Iteration (num., iy, grad. norm, func.) : 14 0 1.226570259e-09 2.876975386e-10 + Iteration (num., iy, grad. norm, func.) : 15 0 1.422487571e-09 2.871370363e-10 + Iteration (num., iy, grad. norm, func.) : 16 0 9.650973771e-10 2.870092820e-10 + Iteration (num., iy, grad. norm, func.) : 17 0 1.293018053e-09 2.869726451e-10 + Iteration (num., iy, grad. norm, func.) : 18 0 8.107957802e-10 2.869185409e-10 + Iteration (num., iy, grad. norm, func.) : 19 0 1.313927988e-09 2.866658413e-10 + Iteration (num., iy, grad. norm, func.) : 20 0 2.635834787e-10 2.865497160e-10 + Iteration (num., iy, grad. norm, func.) : 21 0 2.635834734e-10 2.865497160e-10 + Iteration (num., iy, grad. norm, func.) : 22 0 2.578774093e-10 2.865496490e-10 + Iteration (num., iy, grad. norm, func.) : 23 0 4.117384437e-10 2.865446029e-10 + Iteration (num., iy, grad. norm, func.) : 24 0 2.938743459e-10 2.865417065e-10 + Iteration (num., iy, grad. norm, func.) : 25 0 5.384594569e-10 2.865323547e-10 + Iteration (num., iy, grad. norm, func.) : 26 0 1.419599949e-10 2.865160822e-10 + Iteration (num., iy, grad. norm, func.) : 27 0 2.269368628e-10 2.865093719e-10 + Iteration (num., iy, grad. norm, func.) : 28 0 1.389025200e-10 2.865048956e-10 + Iteration (num., iy, grad. norm, func.) : 29 0 1.707359787e-10 2.865042830e-10 + Iteration (num., iy, grad. norm, func.) : 30 0 1.363224053e-10 2.865028080e-10 + Iteration (num., iy, grad. norm, func.) : 31 0 2.464558404e-10 2.864983462e-10 + Iteration (num., iy, grad. norm, func.) : 32 0 5.047775104e-11 2.864939811e-10 + Iteration (num., iy, grad. norm, func.) : 33 0 3.304233461e-11 2.864939139e-10 + Iteration (num., iy, grad. norm, func.) : 34 0 4.818707765e-11 2.864938652e-10 + Iteration (num., iy, grad. norm, func.) : 35 0 4.519097374e-11 2.864937224e-10 + Iteration (num., iy, grad. norm, func.) : 36 0 5.347794138e-11 2.864935884e-10 + Iteration (num., iy, grad. norm, func.) : 37 0 9.066389563e-11 2.864933993e-10 + Iteration (num., iy, grad. norm, func.) : 38 0 3.049731314e-11 2.864931818e-10 + Iteration (num., iy, grad. norm, func.) : 39 0 3.563923657e-11 2.864930777e-10 + Iteration (num., iy, grad. norm, func.) : 40 0 3.265928637e-11 2.864928292e-10 + Iteration (num., iy, grad. norm, func.) : 41 0 1.841040766e-11 2.864925965e-10 + Iteration (num., iy, grad. norm, func.) : 42 0 1.806812407e-11 2.864925807e-10 + Iteration (num., iy, grad. norm, func.) : 43 0 2.420473432e-11 2.864925725e-10 + Iteration (num., iy, grad. norm, func.) : 44 0 1.916950121e-11 2.864925455e-10 + Iteration (num., iy, grad. norm, func.) : 45 0 1.328187605e-11 2.864925285e-10 + Iteration (num., iy, grad. norm, func.) : 46 0 2.093336318e-11 2.864924982e-10 + Iteration (num., iy, grad. norm, func.) : 47 0 8.582752113e-12 2.864924638e-10 + Iteration (num., iy, grad. norm, func.) : 48 0 8.717555405e-12 2.864924505e-10 + Iteration (num., iy, grad. norm, func.) : 49 0 6.296791425e-12 2.864924452e-10 + Iteration (num., iy, grad. norm, func.) : 50 0 7.865314931e-12 2.864924377e-10 + Iteration (num., iy, grad. norm, func.) : 51 0 7.544612204e-12 2.864924318e-10 + Iteration (num., iy, grad. norm, func.) : 52 0 5.414373093e-12 2.864924301e-10 + Iteration (num., iy, grad. norm, func.) : 53 0 6.886442439e-12 2.864924291e-10 + Iteration (num., iy, grad. norm, func.) : 54 0 4.806737525e-12 2.864924252e-10 + Iteration (num., iy, grad. norm, func.) : 55 0 4.786048698e-12 2.864924232e-10 + Iteration (num., iy, grad. norm, func.) : 56 0 3.098569355e-12 2.864924211e-10 + Iteration (num., iy, grad. norm, func.) : 57 0 3.094287700e-12 2.864924198e-10 + Iteration (num., iy, grad. norm, func.) : 58 0 2.353142651e-12 2.864924186e-10 + Iteration (num., iy, grad. norm, func.) : 59 0 2.921324161e-12 2.864924181e-10 + Iteration (num., iy, grad. norm, func.) : 60 0 2.428313938e-12 2.864924176e-10 + Iteration (num., iy, grad. norm, func.) : 61 0 2.471043088e-12 2.864924172e-10 + Iteration (num., iy, grad. norm, func.) : 62 0 1.730575668e-12 2.864924167e-10 + Iteration (num., iy, grad. norm, func.) : 63 0 1.508461037e-12 2.864924164e-10 + Iteration (num., iy, grad. norm, func.) : 64 0 1.453987524e-12 2.864924162e-10 + Iteration (num., iy, grad. norm, func.) : 65 0 1.684033544e-12 2.864924160e-10 + Iteration (num., iy, grad. norm, func.) : 66 0 9.729856732e-13 2.864924158e-10 + Solving for output 0 - done. Time (sec): 1.1064093 + Solving for output 1 ... + Iteration (num., iy, grad. norm, func.) : 0 1 1.314155074e-05 6.384202420e-06 + Iteration (num., iy, grad. norm, func.) : 0 1 1.315928341e-05 6.143977713e-06 + Iteration (num., iy, grad. norm, func.) : 1 1 1.154682919e-05 7.656947029e-07 + Iteration (num., iy, grad. norm, func.) : 2 1 1.465203371e-05 2.980622274e-07 + Iteration (num., iy, grad. norm, func.) : 3 1 4.625767090e-06 1.079688836e-07 + Iteration (num., iy, grad. norm, func.) : 4 1 8.246064892e-06 9.371523682e-08 + Iteration (num., iy, grad. norm, func.) : 5 1 5.367834919e-06 6.438439692e-08 + Iteration (num., iy, grad. norm, func.) : 6 1 1.544832966e-06 3.861049851e-08 + Iteration (num., iy, grad. norm, func.) : 7 1 9.031989319e-07 3.389328335e-08 + Iteration (num., iy, grad. norm, func.) : 8 1 3.999021991e-07 3.025673984e-08 + Iteration (num., iy, grad. norm, func.) : 9 1 1.814510006e-07 2.271049772e-08 + Iteration (num., iy, grad. norm, func.) : 10 1 8.858417326e-08 1.670709375e-08 + Iteration (num., iy, grad. norm, func.) : 11 1 3.056416974e-08 1.464252693e-08 + Iteration (num., iy, grad. norm, func.) : 12 1 2.782520357e-08 1.462325742e-08 + Iteration (num., iy, grad. norm, func.) : 13 1 2.782520357e-08 1.462325742e-08 + Iteration (num., iy, grad. norm, func.) : 14 1 2.727422124e-08 1.462029725e-08 + Iteration (num., iy, grad. norm, func.) : 15 1 1.965107805e-08 1.459512623e-08 + Iteration (num., iy, grad. norm, func.) : 16 1 2.096614317e-08 1.458538175e-08 + Iteration (num., iy, grad. norm, func.) : 17 1 1.236092175e-08 1.454967165e-08 + Iteration (num., iy, grad. norm, func.) : 18 1 1.593068541e-08 1.451034172e-08 + Iteration (num., iy, grad. norm, func.) : 19 1 5.275613492e-09 1.448191845e-08 + Iteration (num., iy, grad. norm, func.) : 20 1 8.440365910e-09 1.447727819e-08 + Iteration (num., iy, grad. norm, func.) : 21 1 5.742131647e-09 1.447717235e-08 + Iteration (num., iy, grad. norm, func.) : 22 1 9.954048290e-09 1.447641380e-08 + Iteration (num., iy, grad. norm, func.) : 23 1 3.343603473e-09 1.447043541e-08 + Iteration (num., iy, grad. norm, func.) : 24 1 4.464192152e-09 1.446947735e-08 + Iteration (num., iy, grad. norm, func.) : 25 1 2.826027167e-09 1.446820216e-08 + Iteration (num., iy, grad. norm, func.) : 26 1 4.161702182e-09 1.446691431e-08 + Iteration (num., iy, grad. norm, func.) : 27 1 1.748053041e-09 1.446584543e-08 + Iteration (num., iy, grad. norm, func.) : 28 1 2.845455738e-09 1.446523563e-08 + Iteration (num., iy, grad. norm, func.) : 29 1 1.232116011e-09 1.446469173e-08 + Iteration (num., iy, grad. norm, func.) : 30 1 1.086781065e-09 1.446455345e-08 + Iteration (num., iy, grad. norm, func.) : 31 1 1.368466139e-09 1.446430399e-08 + Iteration (num., iy, grad. norm, func.) : 32 1 1.055677821e-09 1.446403720e-08 + Iteration (num., iy, grad. norm, func.) : 33 1 1.513493352e-09 1.446383873e-08 + Iteration (num., iy, grad. norm, func.) : 34 1 5.201430031e-10 1.446375455e-08 + Iteration (num., iy, grad. norm, func.) : 35 1 4.051375251e-10 1.446374741e-08 + Iteration (num., iy, grad. norm, func.) : 36 1 6.626621516e-10 1.446372958e-08 + Iteration (num., iy, grad. norm, func.) : 37 1 5.304259808e-10 1.446367918e-08 + Iteration (num., iy, grad. norm, func.) : 38 1 3.647179408e-10 1.446362945e-08 + Iteration (num., iy, grad. norm, func.) : 39 1 4.390649321e-10 1.446360112e-08 + Iteration (num., iy, grad. norm, func.) : 40 1 2.551311266e-10 1.446359078e-08 + Iteration (num., iy, grad. norm, func.) : 41 1 2.025727989e-10 1.446358914e-08 + Iteration (num., iy, grad. norm, func.) : 42 1 2.590828580e-10 1.446358635e-08 + Iteration (num., iy, grad. norm, func.) : 43 1 2.465075755e-10 1.446357854e-08 + Iteration (num., iy, grad. norm, func.) : 44 1 1.708026086e-10 1.446357145e-08 + Iteration (num., iy, grad. norm, func.) : 45 1 1.842322085e-10 1.446356736e-08 + Iteration (num., iy, grad. norm, func.) : 46 1 1.123597500e-10 1.446356547e-08 + Iteration (num., iy, grad. norm, func.) : 47 1 1.775927306e-10 1.446356518e-08 + Iteration (num., iy, grad. norm, func.) : 48 1 9.881865974e-11 1.446356399e-08 + Iteration (num., iy, grad. norm, func.) : 49 1 9.987164697e-11 1.446356318e-08 + Iteration (num., iy, grad. norm, func.) : 50 1 7.129935940e-11 1.446356175e-08 + Iteration (num., iy, grad. norm, func.) : 51 1 8.491078820e-11 1.446356074e-08 + Iteration (num., iy, grad. norm, func.) : 52 1 3.521791020e-11 1.446356015e-08 + Iteration (num., iy, grad. norm, func.) : 53 1 6.356842260e-11 1.446356014e-08 + Iteration (num., iy, grad. norm, func.) : 54 1 4.199148446e-11 1.446356003e-08 + Iteration (num., iy, grad. norm, func.) : 55 1 7.261550336e-11 1.446355987e-08 + Iteration (num., iy, grad. norm, func.) : 56 1 2.400363005e-11 1.446355951e-08 + Iteration (num., iy, grad. norm, func.) : 57 1 2.328511049e-11 1.446355939e-08 + Iteration (num., iy, grad. norm, func.) : 58 1 2.043397073e-11 1.446355936e-08 + Iteration (num., iy, grad. norm, func.) : 59 1 3.482645243e-11 1.446355935e-08 + Iteration (num., iy, grad. norm, func.) : 60 1 1.573846518e-11 1.446355931e-08 + Iteration (num., iy, grad. norm, func.) : 61 1 2.923943706e-11 1.446355925e-08 + Iteration (num., iy, grad. norm, func.) : 62 1 7.795698296e-12 1.446355918e-08 + Iteration (num., iy, grad. norm, func.) : 63 1 4.678331368e-12 1.446355917e-08 + Iteration (num., iy, grad. norm, func.) : 64 1 6.784246138e-12 1.446355917e-08 + Iteration (num., iy, grad. norm, func.) : 65 1 4.956253838e-12 1.446355916e-08 + Iteration (num., iy, grad. norm, func.) : 66 1 8.895340176e-12 1.446355916e-08 + Iteration (num., iy, grad. norm, func.) : 67 1 5.759073026e-12 1.446355916e-08 + Iteration (num., iy, grad. norm, func.) : 68 1 4.432107556e-12 1.446355916e-08 + Iteration (num., iy, grad. norm, func.) : 69 1 4.534868480e-12 1.446355915e-08 + Iteration (num., iy, grad. norm, func.) : 70 1 3.585348066e-12 1.446355915e-08 + Iteration (num., iy, grad. norm, func.) : 71 1 2.991273639e-12 1.446355915e-08 + Iteration (num., iy, grad. norm, func.) : 72 1 2.181563222e-12 1.446355915e-08 + Iteration (num., iy, grad. norm, func.) : 73 1 3.605945929e-12 1.446355915e-08 + Iteration (num., iy, grad. norm, func.) : 74 1 9.847431611e-13 1.446355915e-08 + Solving for output 1 - done. Time (sec): 1.2501106 + Solving nonlinear problem (n=1764) - done. Time (sec): 2.3565199 + Solving for degrees of freedom - done. Time (sec): 2.3900001 + Training - done. Time (sec): 2.4030080 + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0009954 + + Prediction time/pt. (sec) : 0.0000020 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0009971 + + Prediction time/pt. (sec) : 0.0000020 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0009973 + + Prediction time/pt. (sec) : 0.0000020 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0009975 + + Prediction time/pt. (sec) : 0.0000020 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0009973 + + Prediction time/pt. (sec) : 0.0000020 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0009892 + + Prediction time/pt. (sec) : 0.0000020 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 500 + + Predicting ... + Predicting - done. Time (sec): 0.0010078 + + Prediction time/pt. (sec) : 0.0000020 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 2500 + + Predicting ... + Predicting - done. Time (sec): 0.0009947 + + Prediction time/pt. (sec) : 0.0000004 + + ___________________________________________________________________________ + + Evaluation + + # eval points. : 2500 + + Predicting ... + Predicting - done. Time (sec): 0.0019960 + + Prediction time/pt. (sec) : 0.0000008 + + +.. figure:: rans_crm_wing.png + :scale: 60 % + :align: center diff --git a/doc/_src_docs/problems/branin.rst b/doc/_src_docs/problems/branin.rst index 2e1e2edcb..c54703cf9 100644 --- a/doc/_src_docs/problems/branin.rst +++ b/doc/_src_docs/problems/branin.rst @@ -1,85 +1,85 @@ -Branin function -=================== - -.. math :: - f(x) = (x_2 - \frac{5.1}{4\pi^2}x_1^{2} + \frac{5}{\pi}x_1 - 6)^2 + 10(1-\frac{1}{8\pi})\cos(x_1) + 10, - -where -:math:`x = (x_1, x_2)` -with -:math:`-5\leq x_1 \leq 10, 0 \leq x_2 \leq 15` - -The Branin function has three global minima: - -:math:`f(x^*) = 0.397887`, -at -:math:`x^* = (-\pi, 12.275), (\pi, 2.275)` and :math:`(9.42478, 2.475)` - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.problems import Branin - - ndim = 2 - problem = Branin(ndim=ndim) - - num = 100 - x = np.ones((num, ndim)) - x[:, 0] = np.linspace(-5.0, 10.0, num) - x[:, 1] = np.linspace(0.0, 15.0, num) - y = problem(x) - - yd = np.empty((num, ndim)) - for i in range(ndim): - yd[:, i] = problem(x, kx=i).flatten() - - print(y.shape) - print(yd.shape) - - plt.plot(x[:, 0], y[:, 0]) - plt.xlabel("x") - plt.ylabel("y") - plt.show() - -:: - - (100, 1) - (100, 2) - -.. figure:: branin_Test_test_branin.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - ndim - - 2 - - [2] - - ['int'] - - - * - return_complex - - False - - None - - ['bool'] - - - * - name - - Branin - - None - - ['str'] - - +Branin function +=================== + +.. math :: + f(x) = (x_2 - \frac{5.1}{4\pi^2}x_1^{2} + \frac{5}{\pi}x_1 - 6)^2 + 10(1-\frac{1}{8\pi})\cos(x_1) + 10, + +where +:math:`x = (x_1, x_2)` +with +:math:`-5\leq x_1 \leq 10, 0 \leq x_2 \leq 15` + +The Branin function has three global minima: + +:math:`f(x^*) = 0.397887`, +at +:math:`x^* = (-\pi, 12.275), (\pi, 2.275)` and :math:`(9.42478, 2.475)` + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.problems import Branin + + ndim = 2 + problem = Branin(ndim=ndim) + + num = 100 + x = np.ones((num, ndim)) + x[:, 0] = np.linspace(-5.0, 10.0, num) + x[:, 1] = np.linspace(0.0, 15.0, num) + y = problem(x) + + yd = np.empty((num, ndim)) + for i in range(ndim): + yd[:, i] = problem(x, kx=i).flatten() + + print(y.shape) + print(yd.shape) + + plt.plot(x[:, 0], y[:, 0]) + plt.xlabel("x") + plt.ylabel("y") + plt.show() + +:: + + (100, 1) + (100, 2) + +.. figure:: branin_Test_test_branin.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - ndim + - 2 + - [2] + - ['int'] + - + * - return_complex + - False + - None + - ['bool'] + - + * - name + - Branin + - None + - ['str'] + - diff --git a/doc/_src_docs/problems/branin_Test_test_branin.png b/doc/_src_docs/problems/branin_Test_test_branin.png index 1821f2384..0dbc106f0 100644 Binary files a/doc/_src_docs/problems/branin_Test_test_branin.png and b/doc/_src_docs/problems/branin_Test_test_branin.png differ diff --git a/doc/_src_docs/problems/cantileverbeam.rst b/doc/_src_docs/problems/cantileverbeam.rst index 9cdda8f4f..dc5b7841b 100644 --- a/doc/_src_docs/problems/cantileverbeam.rst +++ b/doc/_src_docs/problems/cantileverbeam.rst @@ -1,87 +1,87 @@ -Cantilever beam function -======================== - -.. math :: - \frac{50}{600}\sum\limits_{i=1}^{17}\left[\frac{12}{b_ih_i^3}\left(\left(\sum\limits_{j=i}^{17}l_j\right)^3-\left(\sum\limits_{j=i+1}^{17}l_j\right)^3\right)\right], - -:math:`b_i\in [0.01,0.05], \quad h_i\in[0.3,0.65], \quad l_i\in[0.5,1].` - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.problems import CantileverBeam - - ndim = 3 - problem = CantileverBeam(ndim=ndim) - - num = 100 - x = np.ones((num, ndim)) - x[:, 0] = np.linspace(0.01, 0.05, num) - x[:, 1] = 0.5 - x[:, 2] = 0.5 - y = problem(x) - - yd = np.empty((num, ndim)) - for i in range(ndim): - yd[:, i] = problem(x, kx=i).flatten() - - print(y.shape) - print(yd.shape) - - plt.plot(x[:, 0], y[:, 0]) - plt.xlabel("x") - plt.ylabel("y") - plt.show() - -:: - - (100, 1) - (100, 3) - -.. figure:: cantileverbeam_Test_test_cantilever_beam.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - ndim - - 3 - - None - - ['int'] - - - * - return_complex - - False - - None - - ['bool'] - - - * - name - - CantileverBeam - - None - - ['str'] - - - * - P - - 50000.0 - - None - - ['int', 'float'] - - Tip load (50 kN) - * - E - - 200000000000.0 - - None - - ['int', 'float'] - - Modulus of elast. (200 GPa) +Cantilever beam function +======================== + +.. math :: + \frac{50}{600}\sum\limits_{i=1}^{17}\left[\frac{12}{b_ih_i^3}\left(\left(\sum\limits_{j=i}^{17}l_j\right)^3-\left(\sum\limits_{j=i+1}^{17}l_j\right)^3\right)\right], + +:math:`b_i\in [0.01,0.05], \quad h_i\in[0.3,0.65], \quad l_i\in[0.5,1].` + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.problems import CantileverBeam + + ndim = 3 + problem = CantileverBeam(ndim=ndim) + + num = 100 + x = np.ones((num, ndim)) + x[:, 0] = np.linspace(0.01, 0.05, num) + x[:, 1] = 0.5 + x[:, 2] = 0.5 + y = problem(x) + + yd = np.empty((num, ndim)) + for i in range(ndim): + yd[:, i] = problem(x, kx=i).flatten() + + print(y.shape) + print(yd.shape) + + plt.plot(x[:, 0], y[:, 0]) + plt.xlabel("x") + plt.ylabel("y") + plt.show() + +:: + + (100, 1) + (100, 3) + +.. figure:: cantileverbeam_Test_test_cantilever_beam.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - ndim + - 3 + - None + - ['int'] + - + * - return_complex + - False + - None + - ['bool'] + - + * - name + - CantileverBeam + - None + - ['str'] + - + * - P + - 50000.0 + - None + - ['int', 'float'] + - Tip load (50 kN) + * - E + - 200000000000.0 + - None + - ['int', 'float'] + - Modulus of elast. (200 GPa) diff --git a/doc/_src_docs/problems/cantileverbeam_Test_test_cantilever_beam.png b/doc/_src_docs/problems/cantileverbeam_Test_test_cantilever_beam.png index d0dff137d..3630ff1e2 100644 Binary files a/doc/_src_docs/problems/cantileverbeam_Test_test_cantilever_beam.png and b/doc/_src_docs/problems/cantileverbeam_Test_test_cantilever_beam.png differ diff --git a/doc/_src_docs/problems/lp_norm.rst b/doc/_src_docs/problems/lp_norm.rst index 521d4eef9..aa110531e 100644 --- a/doc/_src_docs/problems/lp_norm.rst +++ b/doc/_src_docs/problems/lp_norm.rst @@ -1,79 +1,79 @@ -Lp norm function -=================== - -.. math :: - f(x) = \left \| x \right \|_p = \sqrt[p]{\sum\limits_{i}^{nx}\left | x_i \right |^p}, - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.problems import LpNorm - - ndim = 2 - problem = LpNorm(ndim=ndim, order=2) - - num = 100 - x = np.ones((num, ndim)) - x[:, 0] = np.linspace(-1.0, 1.0, num) - x[:, 1] = np.linspace(-1.0, 1.0, num) - y = problem(x) - - yd = np.empty((num, ndim)) - for i in range(ndim): - yd[:, i] = problem(x, kx=i).flatten() - - print(y.shape) - print(yd.shape) - - plt.plot(x[:, 0], y[:, 0]) - plt.xlabel("x") - plt.ylabel("y") - plt.show() - -:: - - (100, 1) - (100, 2) - -.. figure:: lp_norm_Test_test_lp_norm.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - ndim - - 1 - - None - - ['int'] - - - * - return_complex - - False - - None - - ['bool'] - - - * - order - - 2 - - None - - ['int'] - - - * - name - - LpNorm - - None - - ['str'] - - +Lp norm function +=================== + +.. math :: + f(x) = \left \| x \right \|_p = \sqrt[p]{\sum\limits_{i}^{nx}\left | x_i \right |^p}, + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.problems import LpNorm + + ndim = 2 + problem = LpNorm(ndim=ndim, order=2) + + num = 100 + x = np.ones((num, ndim)) + x[:, 0] = np.linspace(-1.0, 1.0, num) + x[:, 1] = np.linspace(-1.0, 1.0, num) + y = problem(x) + + yd = np.empty((num, ndim)) + for i in range(ndim): + yd[:, i] = problem(x, kx=i).flatten() + + print(y.shape) + print(yd.shape) + + plt.plot(x[:, 0], y[:, 0]) + plt.xlabel("x") + plt.ylabel("y") + plt.show() + +:: + + (100, 1) + (100, 2) + +.. figure:: lp_norm_Test_test_lp_norm.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - ndim + - 1 + - None + - ['int'] + - + * - return_complex + - False + - None + - ['bool'] + - + * - order + - 2 + - None + - ['int'] + - + * - name + - LpNorm + - None + - ['str'] + - diff --git a/doc/_src_docs/problems/lp_norm_Test_test_lp_norm.png b/doc/_src_docs/problems/lp_norm_Test_test_lp_norm.png index fbbb2b282..4f678ac53 100644 Binary files a/doc/_src_docs/problems/lp_norm_Test_test_lp_norm.png and b/doc/_src_docs/problems/lp_norm_Test_test_lp_norm.png differ diff --git a/doc/_src_docs/problems/robotarm.rst b/doc/_src_docs/problems/robotarm.rst index 3a4bde674..060c00f29 100644 --- a/doc/_src_docs/problems/robotarm.rst +++ b/doc/_src_docs/problems/robotarm.rst @@ -1,75 +1,75 @@ -Robot arm function -================== - -.. math :: - \sqrt{\left(\sum\limits_{i=1}^4L_i\cos\left(\sum\limits_{j=1}^i\theta_j\right)\right)^2+\left(\sum\limits_{i=1}^4L_i\sin\left(\sum\limits_{j=1}^i\theta_j\right) - \right)^2},\quad L_i \in [0,1] \quad \text{for}\quad i=1,\dotsc,4 \quad \text{and}\quad \theta_j \in [0,2\pi]\quad\text{for}\quad j=1,\dotsc,4. - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.problems import RobotArm - - ndim = 2 - problem = RobotArm(ndim=ndim) - - num = 100 - x = np.ones((num, ndim)) - x[:, 0] = np.linspace(0.0, 1.0, num) - x[:, 1] = np.pi - y = problem(x) - - yd = np.empty((num, ndim)) - for i in range(ndim): - yd[:, i] = problem(x, kx=i).flatten() - - print(y.shape) - print(yd.shape) - - plt.plot(x[:, 0], y[:, 0]) - plt.xlabel("x") - plt.ylabel("y") - plt.show() - -:: - - (100, 1) - (100, 2) - -.. figure:: robotarm_Test_test_robot_arm.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - ndim - - 2 - - None - - ['int'] - - - * - return_complex - - False - - None - - ['bool'] - - - * - name - - RobotArm - - None - - ['str'] - - +Robot arm function +================== + +.. math :: + \sqrt{\left(\sum\limits_{i=1}^4L_i\cos\left(\sum\limits_{j=1}^i\theta_j\right)\right)^2+\left(\sum\limits_{i=1}^4L_i\sin\left(\sum\limits_{j=1}^i\theta_j\right) + \right)^2},\quad L_i \in [0,1] \quad \text{for}\quad i=1,\dotsc,4 \quad \text{and}\quad \theta_j \in [0,2\pi]\quad\text{for}\quad j=1,\dotsc,4. + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.problems import RobotArm + + ndim = 2 + problem = RobotArm(ndim=ndim) + + num = 100 + x = np.ones((num, ndim)) + x[:, 0] = np.linspace(0.0, 1.0, num) + x[:, 1] = np.pi + y = problem(x) + + yd = np.empty((num, ndim)) + for i in range(ndim): + yd[:, i] = problem(x, kx=i).flatten() + + print(y.shape) + print(yd.shape) + + plt.plot(x[:, 0], y[:, 0]) + plt.xlabel("x") + plt.ylabel("y") + plt.show() + +:: + + (100, 1) + (100, 2) + +.. figure:: robotarm_Test_test_robot_arm.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - ndim + - 2 + - None + - ['int'] + - + * - return_complex + - False + - None + - ['bool'] + - + * - name + - RobotArm + - None + - ['str'] + - diff --git a/doc/_src_docs/problems/robotarm_Test_test_robot_arm.png b/doc/_src_docs/problems/robotarm_Test_test_robot_arm.png index a7df9680b..9b00db483 100644 Binary files a/doc/_src_docs/problems/robotarm_Test_test_robot_arm.png and b/doc/_src_docs/problems/robotarm_Test_test_robot_arm.png differ diff --git a/doc/_src_docs/problems/rosenbrock.rst b/doc/_src_docs/problems/rosenbrock.rst index 2aea19ed2..079914e29 100644 --- a/doc/_src_docs/problems/rosenbrock.rst +++ b/doc/_src_docs/problems/rosenbrock.rst @@ -1,74 +1,74 @@ -Rosenbrock function -=================== - -.. math :: - \sum\limits_{i=1}^{nx-1}\left[(x_{i+1}-x_i^2)^2+(x_i-1)^2\right],\quad-2\leq x_i\leq 2,\quad\text{ for }i=1,\ldots,nx. - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.problems import Rosenbrock - - ndim = 2 - problem = Rosenbrock(ndim=ndim) - - num = 100 - x = np.ones((num, ndim)) - x[:, 0] = np.linspace(-2, 2.0, num) - x[:, 1] = 0.0 - y = problem(x) - - yd = np.empty((num, ndim)) - for i in range(ndim): - yd[:, i] = problem(x, kx=i).flatten() - - print(y.shape) - print(yd.shape) - - plt.plot(x[:, 0], y[:, 0]) - plt.xlabel("x") - plt.ylabel("y") - plt.show() - -:: - - (100, 1) - (100, 2) - -.. figure:: rosenbrock_Test_test_rosenbrock.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - ndim - - 1 - - None - - ['int'] - - - * - return_complex - - False - - None - - ['bool'] - - - * - name - - Rosenbrock - - None - - ['str'] - - +Rosenbrock function +=================== + +.. math :: + \sum\limits_{i=1}^{nx-1}\left[(x_{i+1}-x_i^2)^2+(x_i-1)^2\right],\quad-2\leq x_i\leq 2,\quad\text{ for }i=1,\ldots,nx. + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.problems import Rosenbrock + + ndim = 2 + problem = Rosenbrock(ndim=ndim) + + num = 100 + x = np.ones((num, ndim)) + x[:, 0] = np.linspace(-2, 2.0, num) + x[:, 1] = 0.0 + y = problem(x) + + yd = np.empty((num, ndim)) + for i in range(ndim): + yd[:, i] = problem(x, kx=i).flatten() + + print(y.shape) + print(yd.shape) + + plt.plot(x[:, 0], y[:, 0]) + plt.xlabel("x") + plt.ylabel("y") + plt.show() + +:: + + (100, 1) + (100, 2) + +.. figure:: rosenbrock_Test_test_rosenbrock.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - ndim + - 1 + - None + - ['int'] + - + * - return_complex + - False + - None + - ['bool'] + - + * - name + - Rosenbrock + - None + - ['str'] + - diff --git a/doc/_src_docs/problems/rosenbrock_Test_test_rosenbrock.png b/doc/_src_docs/problems/rosenbrock_Test_test_rosenbrock.png index a131be3d2..6caeddbf7 100644 Binary files a/doc/_src_docs/problems/rosenbrock_Test_test_rosenbrock.png and b/doc/_src_docs/problems/rosenbrock_Test_test_rosenbrock.png differ diff --git a/doc/_src_docs/problems/sphere.rst b/doc/_src_docs/problems/sphere.rst index e406b72c7..01b633b19 100644 --- a/doc/_src_docs/problems/sphere.rst +++ b/doc/_src_docs/problems/sphere.rst @@ -1,74 +1,74 @@ -Sphere function -=============== - -.. math :: - \sum\limits_{i=1}^{nx}x_i^2,\quad-10\leq x_i\leq 10,\quad\text{ for }i=1,\ldots,nx. - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.problems import Sphere - - ndim = 2 - problem = Sphere(ndim=ndim) - - num = 100 - x = np.ones((num, ndim)) - x[:, 0] = np.linspace(-10, 10.0, num) - x[:, 1] = 0.0 - y = problem(x) - - yd = np.empty((num, ndim)) - for i in range(ndim): - yd[:, i] = problem(x, kx=i).flatten() - - print(y.shape) - print(yd.shape) - - plt.plot(x[:, 0], y[:, 0]) - plt.xlabel("x") - plt.ylabel("y") - plt.show() - -:: - - (100, 1) - (100, 2) - -.. figure:: sphere_Test_test_sphere.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - ndim - - 1 - - None - - ['int'] - - - * - return_complex - - False - - None - - ['bool'] - - - * - name - - Sphere - - None - - ['str'] - - +Sphere function +=============== + +.. math :: + \sum\limits_{i=1}^{nx}x_i^2,\quad-10\leq x_i\leq 10,\quad\text{ for }i=1,\ldots,nx. + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.problems import Sphere + + ndim = 2 + problem = Sphere(ndim=ndim) + + num = 100 + x = np.ones((num, ndim)) + x[:, 0] = np.linspace(-10, 10.0, num) + x[:, 1] = 0.0 + y = problem(x) + + yd = np.empty((num, ndim)) + for i in range(ndim): + yd[:, i] = problem(x, kx=i).flatten() + + print(y.shape) + print(yd.shape) + + plt.plot(x[:, 0], y[:, 0]) + plt.xlabel("x") + plt.ylabel("y") + plt.show() + +:: + + (100, 1) + (100, 2) + +.. figure:: sphere_Test_test_sphere.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - ndim + - 1 + - None + - ['int'] + - + * - return_complex + - False + - None + - ['bool'] + - + * - name + - Sphere + - None + - ['str'] + - diff --git a/doc/_src_docs/problems/sphere_Test_test_sphere.png b/doc/_src_docs/problems/sphere_Test_test_sphere.png index 5177600d8..7b7d93147 100644 Binary files a/doc/_src_docs/problems/sphere_Test_test_sphere.png and b/doc/_src_docs/problems/sphere_Test_test_sphere.png differ diff --git a/doc/_src_docs/problems/tensorproduct.rst b/doc/_src_docs/problems/tensorproduct.rst index 593b583c8..57d30c414 100644 --- a/doc/_src_docs/problems/tensorproduct.rst +++ b/doc/_src_docs/problems/tensorproduct.rst @@ -1,101 +1,101 @@ -Tensor-product function -======================= - -.. rubric :: cos - -.. math :: - \prod\limits_{i=1}^{nx}\cos(a\pi x_i),\quad-1\leq x_i\leq 1,\quad\text{ for }i=1,\ldots,nx. - -.. rubric :: exp - -.. math :: - \prod\limits_{i=1}^{nx}\exp(x_i),\quad-1\leq x_i\leq 1,\quad\text{ for }i=1,\ldots,nx. - -.. rubric :: tanh - -.. math :: - \prod\limits_{i=1}^{nx}\tanh(x_i),\quad-1\leq x_i\leq 1,\quad\text{ for }i=1,\ldots,nx. - -.. rubric :: gaussian - -.. math :: - \prod\limits_{i=1}^{nx}\exp(-2 x_i^2),\quad-1\leq x_i\leq 1,\quad\text{ for }i=1,\ldots,nx. - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.problems import TensorProduct - - ndim = 2 - problem = TensorProduct(ndim=ndim, func="cos") - - num = 100 - x = np.ones((num, ndim)) - x[:, 0] = np.linspace(-1, 1.0, num) - x[:, 1] = 0.0 - y = problem(x) - - yd = np.empty((num, ndim)) - for i in range(ndim): - yd[:, i] = problem(x, kx=i).flatten() - - print(y.shape) - print(yd.shape) - - plt.plot(x[:, 0], y[:, 0]) - plt.xlabel("x") - plt.ylabel("y") - plt.show() - -:: - - (100, 1) - (100, 2) - -.. figure:: tensorproduct_Test_test_tensor_product.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - ndim - - 1 - - None - - ['int'] - - - * - return_complex - - False - - None - - ['bool'] - - - * - name - - TP - - None - - ['str'] - - - * - func - - None - - ['cos', 'exp', 'tanh', 'gaussian'] - - None - - - * - width - - 1.0 - - None - - ['float', 'int'] - - +Tensor-product function +======================= + +.. rubric :: cos + +.. math :: + \prod\limits_{i=1}^{nx}\cos(a\pi x_i),\quad-1\leq x_i\leq 1,\quad\text{ for }i=1,\ldots,nx. + +.. rubric :: exp + +.. math :: + \prod\limits_{i=1}^{nx}\exp(x_i),\quad-1\leq x_i\leq 1,\quad\text{ for }i=1,\ldots,nx. + +.. rubric :: tanh + +.. math :: + \prod\limits_{i=1}^{nx}\tanh(x_i),\quad-1\leq x_i\leq 1,\quad\text{ for }i=1,\ldots,nx. + +.. rubric :: gaussian + +.. math :: + \prod\limits_{i=1}^{nx}\exp(-2 x_i^2),\quad-1\leq x_i\leq 1,\quad\text{ for }i=1,\ldots,nx. + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.problems import TensorProduct + + ndim = 2 + problem = TensorProduct(ndim=ndim, func="cos") + + num = 100 + x = np.ones((num, ndim)) + x[:, 0] = np.linspace(-1, 1.0, num) + x[:, 1] = 0.0 + y = problem(x) + + yd = np.empty((num, ndim)) + for i in range(ndim): + yd[:, i] = problem(x, kx=i).flatten() + + print(y.shape) + print(yd.shape) + + plt.plot(x[:, 0], y[:, 0]) + plt.xlabel("x") + plt.ylabel("y") + plt.show() + +:: + + (100, 1) + (100, 2) + +.. figure:: tensorproduct_Test_test_tensor_product.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - ndim + - 1 + - None + - ['int'] + - + * - return_complex + - False + - None + - ['bool'] + - + * - name + - TP + - None + - ['str'] + - + * - func + - None + - ['cos', 'exp', 'tanh', 'gaussian'] + - None + - + * - width + - 1.0 + - None + - ['float', 'int'] + - diff --git a/doc/_src_docs/problems/tensorproduct_Test_test_tensor_product.png b/doc/_src_docs/problems/tensorproduct_Test_test_tensor_product.png index 4bcb576f5..445fc0ad0 100644 Binary files a/doc/_src_docs/problems/tensorproduct_Test_test_tensor_product.png and b/doc/_src_docs/problems/tensorproduct_Test_test_tensor_product.png differ diff --git a/doc/_src_docs/problems/torsionvibration.rst b/doc/_src_docs/problems/torsionvibration.rst index 4fd0304a5..0069908be 100644 --- a/doc/_src_docs/problems/torsionvibration.rst +++ b/doc/_src_docs/problems/torsionvibration.rst @@ -1,85 +1,85 @@ -Torsion vibration function -========================== - -.. math :: - \frac{1}{2\pi}\sqrt{\frac{-b-\sqrt{b^2-4ac}}{2a}}, - -where -:math:`K_i=\frac{\pi G_id_i}{32L_i},\quad M_j=\frac{\rho_j \pi t_jD_j}{4g},\quad J_j=0.5M_j\frac{D_j}{2},\quad a = 1,\quad b=-\left(\frac{K_1+K2}{J_1}+\frac{K_2+K3}{J_2}\right),\quad c=\frac{K_1K_2+K_2K_3+K_3K_1}{J_1J_2},\quad \text{for}\quad d_1\in [1.8,2.2],\quad L_1\in[9,11],\quad G_1\in [105300000,128700000],` -:math:`d_2\in [1.638,2.002],\quad L_2\in[10.8,13.2],\quad G_2\in[5580000,6820000],\quad d_3\in[2.025,2.475],\quad L_3\in[7.2,8.8],\quad G_3\in[3510000,4290000],\quad D_1\in[10.8,13.2],\quad t_1\in[2.7,3.3],` -:math:`\rho_1\in[0.252,0.308], \quad D_2\in[12.6,15.4],\quad t_2\in[3.6,4.4],\quad\rho_1\in[0.09,0.11].` - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.problems import TorsionVibration - - ndim = 15 - problem = TorsionVibration(ndim=ndim) - - num = 100 - x = np.ones((num, ndim)) - for i in range(ndim): - x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1]) - x[:, 0] = np.linspace(1.8, 2.2, num) - y = problem(x) - - yd = np.empty((num, ndim)) - for i in range(ndim): - yd[:, i] = problem(x, kx=i).flatten() - - print(y.shape) - print(yd.shape) - - plt.plot(x[:, 0], y[:, 0]) - plt.xlabel("x") - plt.ylabel("y") - plt.show() - -:: - - (100, 1) - (100, 15) - -.. figure:: torsionvibration_Test_test_torsion_vibration.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - ndim - - 1 - - None - - ['int'] - - - * - return_complex - - False - - None - - ['bool'] - - - * - name - - TorsionVibration - - None - - ['str'] - - - * - use_FD - - False - - None - - ['bool'] - - +Torsion vibration function +========================== + +.. math :: + \frac{1}{2\pi}\sqrt{\frac{-b-\sqrt{b^2-4ac}}{2a}}, + +where +:math:`K_i=\frac{\pi G_id_i}{32L_i},\quad M_j=\frac{\rho_j \pi t_jD_j}{4g},\quad J_j=0.5M_j\frac{D_j}{2},\quad a = 1,\quad b=-\left(\frac{K_1+K2}{J_1}+\frac{K_2+K3}{J_2}\right),\quad c=\frac{K_1K_2+K_2K_3+K_3K_1}{J_1J_2},\quad \text{for}\quad d_1\in [1.8,2.2],\quad L_1\in[9,11],\quad G_1\in [105300000,128700000],` +:math:`d_2\in [1.638,2.002],\quad L_2\in[10.8,13.2],\quad G_2\in[5580000,6820000],\quad d_3\in[2.025,2.475],\quad L_3\in[7.2,8.8],\quad G_3\in[3510000,4290000],\quad D_1\in[10.8,13.2],\quad t_1\in[2.7,3.3],` +:math:`\rho_1\in[0.252,0.308], \quad D_2\in[12.6,15.4],\quad t_2\in[3.6,4.4],\quad\rho_1\in[0.09,0.11].` + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.problems import TorsionVibration + + ndim = 15 + problem = TorsionVibration(ndim=ndim) + + num = 100 + x = np.ones((num, ndim)) + for i in range(ndim): + x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1]) + x[:, 0] = np.linspace(1.8, 2.2, num) + y = problem(x) + + yd = np.empty((num, ndim)) + for i in range(ndim): + yd[:, i] = problem(x, kx=i).flatten() + + print(y.shape) + print(yd.shape) + + plt.plot(x[:, 0], y[:, 0]) + plt.xlabel("x") + plt.ylabel("y") + plt.show() + +:: + + (100, 1) + (100, 15) + +.. figure:: torsionvibration_Test_test_torsion_vibration.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - ndim + - 1 + - None + - ['int'] + - + * - return_complex + - False + - None + - ['bool'] + - + * - name + - TorsionVibration + - None + - ['str'] + - + * - use_FD + - False + - None + - ['bool'] + - diff --git a/doc/_src_docs/problems/torsionvibration_Test_test_torsion_vibration.png b/doc/_src_docs/problems/torsionvibration_Test_test_torsion_vibration.png index adc78de6e..7faa9ebda 100644 Binary files a/doc/_src_docs/problems/torsionvibration_Test_test_torsion_vibration.png and b/doc/_src_docs/problems/torsionvibration_Test_test_torsion_vibration.png differ diff --git a/doc/_src_docs/problems/waterflow.rst b/doc/_src_docs/problems/waterflow.rst index 7c3f9d6b8..12578fb5c 100644 --- a/doc/_src_docs/problems/waterflow.rst +++ b/doc/_src_docs/problems/waterflow.rst @@ -1,82 +1,82 @@ -Water flow function -=================== - -.. math :: - \frac{2\pi T_u\left(H_u-H_l\right)}{\ln\left(\frac{r}{r_w}\right)\left[1+\frac{2LT_u}{\ln\left(\frac{r}{r_w}\right)r_w^2K_w}+\frac{T_u}{T_l}\right]}, - -:math:`0.05\leq r_w\leq 0.15,\quad 100\leq r\leq 50000,\quad 63070\leq T_u\leq 115600,\quad 990 \leq H_u \leq 1110, \quad 63.1 \leq T_l \leq 116, \quad 700 \leq H_l \leq 820, \quad 1120 \leq L \leq 1680,\quad \text{and}\quad 9855 \leq K_w \leq 12045.` - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.problems import WaterFlow - - ndim = 8 - problem = WaterFlow(ndim=ndim) - - num = 100 - x = np.ones((num, ndim)) - for i in range(ndim): - x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1]) - x[:, 0] = np.linspace(0.05, 0.15, num) - y = problem(x) - - yd = np.empty((num, ndim)) - for i in range(ndim): - yd[:, i] = problem(x, kx=i).flatten() - - print(y.shape) - print(yd.shape) - - plt.plot(x[:, 0], y[:, 0]) - plt.xlabel("x") - plt.ylabel("y") - plt.show() - -:: - - (100, 1) - (100, 8) - -.. figure:: waterflow_Test_test_water_flow.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - ndim - - 1 - - None - - ['int'] - - - * - return_complex - - False - - None - - ['bool'] - - - * - name - - WaterFlow - - None - - ['str'] - - - * - use_FD - - False - - None - - ['bool'] - - +Water flow function +=================== + +.. math :: + \frac{2\pi T_u\left(H_u-H_l\right)}{\ln\left(\frac{r}{r_w}\right)\left[1+\frac{2LT_u}{\ln\left(\frac{r}{r_w}\right)r_w^2K_w}+\frac{T_u}{T_l}\right]}, + +:math:`0.05\leq r_w\leq 0.15,\quad 100\leq r\leq 50000,\quad 63070\leq T_u\leq 115600,\quad 990 \leq H_u \leq 1110, \quad 63.1 \leq T_l \leq 116, \quad 700 \leq H_l \leq 820, \quad 1120 \leq L \leq 1680,\quad \text{and}\quad 9855 \leq K_w \leq 12045.` + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.problems import WaterFlow + + ndim = 8 + problem = WaterFlow(ndim=ndim) + + num = 100 + x = np.ones((num, ndim)) + for i in range(ndim): + x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1]) + x[:, 0] = np.linspace(0.05, 0.15, num) + y = problem(x) + + yd = np.empty((num, ndim)) + for i in range(ndim): + yd[:, i] = problem(x, kx=i).flatten() + + print(y.shape) + print(yd.shape) + + plt.plot(x[:, 0], y[:, 0]) + plt.xlabel("x") + plt.ylabel("y") + plt.show() + +:: + + (100, 1) + (100, 8) + +.. figure:: waterflow_Test_test_water_flow.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - ndim + - 1 + - None + - ['int'] + - + * - return_complex + - False + - None + - ['bool'] + - + * - name + - WaterFlow + - None + - ['str'] + - + * - use_FD + - False + - None + - ['bool'] + - diff --git a/doc/_src_docs/problems/waterflow_Test_test_water_flow.png b/doc/_src_docs/problems/waterflow_Test_test_water_flow.png index 764a1eba3..399a0aec8 100644 Binary files a/doc/_src_docs/problems/waterflow_Test_test_water_flow.png and b/doc/_src_docs/problems/waterflow_Test_test_water_flow.png differ diff --git a/doc/_src_docs/problems/weldedbeam.rst b/doc/_src_docs/problems/weldedbeam.rst index 73f38ed8e..876db5899 100644 --- a/doc/_src_docs/problems/weldedbeam.rst +++ b/doc/_src_docs/problems/weldedbeam.rst @@ -1,83 +1,83 @@ -Welded beam function -==================== - -.. math :: - \sqrt{\frac{\tau'^2+\tau''^2+l\tau'\tau''}{\sqrt{0.25\left(l^2+(h+t)^2\right)}}}, - -where -:math:`\tau'=\frac{6000}{\sqrt{2}hl}, \quad\tau''=\frac{6000(14+0.5l)\sqrt{0.25\left(l^2+(h+t)^2\right)}}{2\left[0.707hl\left(\frac{l^2}{12}+0.25(h+t)^2\right)\right]},\quad \text{for}\quad h\in[0.125,1],\quad l,t\in[5,10].` - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.problems import WeldedBeam - - ndim = 3 - problem = WeldedBeam(ndim=ndim) - - num = 100 - x = np.ones((num, ndim)) - for i in range(ndim): - x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1]) - x[:, 0] = np.linspace(5.0, 10.0, num) - y = problem(x) - - yd = np.empty((num, ndim)) - for i in range(ndim): - yd[:, i] = problem(x, kx=i).flatten() - - print(y.shape) - print(yd.shape) - - plt.plot(x[:, 0], y[:, 0]) - plt.xlabel("x") - plt.ylabel("y") - plt.show() - -:: - - (100, 1) - (100, 3) - -.. figure:: weldedbeam_Test_test_welded_beam.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - ndim - - 1 - - None - - ['int'] - - - * - return_complex - - False - - None - - ['bool'] - - - * - name - - WeldedBeam - - None - - ['str'] - - - * - use_FD - - False - - None - - ['bool'] - - +Welded beam function +==================== + +.. math :: + \sqrt{\frac{\tau'^2+\tau''^2+l\tau'\tau''}{\sqrt{0.25\left(l^2+(h+t)^2\right)}}}, + +where +:math:`\tau'=\frac{6000}{\sqrt{2}hl}, \quad\tau''=\frac{6000(14+0.5l)\sqrt{0.25\left(l^2+(h+t)^2\right)}}{2\left[0.707hl\left(\frac{l^2}{12}+0.25(h+t)^2\right)\right]},\quad \text{for}\quad h\in[0.125,1],\quad l,t\in[5,10].` + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.problems import WeldedBeam + + ndim = 3 + problem = WeldedBeam(ndim=ndim) + + num = 100 + x = np.ones((num, ndim)) + for i in range(ndim): + x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1]) + x[:, 0] = np.linspace(5.0, 10.0, num) + y = problem(x) + + yd = np.empty((num, ndim)) + for i in range(ndim): + yd[:, i] = problem(x, kx=i).flatten() + + print(y.shape) + print(yd.shape) + + plt.plot(x[:, 0], y[:, 0]) + plt.xlabel("x") + plt.ylabel("y") + plt.show() + +:: + + (100, 1) + (100, 3) + +.. figure:: weldedbeam_Test_test_welded_beam.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - ndim + - 1 + - None + - ['int'] + - + * - return_complex + - False + - None + - ['bool'] + - + * - name + - WeldedBeam + - None + - ['str'] + - + * - use_FD + - False + - None + - ['bool'] + - diff --git a/doc/_src_docs/problems/weldedbeam_Test_test_welded_beam.png b/doc/_src_docs/problems/weldedbeam_Test_test_welded_beam.png index 04384a604..5554c841e 100644 Binary files a/doc/_src_docs/problems/weldedbeam_Test_test_welded_beam.png and b/doc/_src_docs/problems/weldedbeam_Test_test_welded_beam.png differ diff --git a/doc/_src_docs/sampling_methods/full_factorial.rst b/doc/_src_docs/sampling_methods/full_factorial.rst index f2d50c59e..ebd5eac19 100644 --- a/doc/_src_docs/sampling_methods/full_factorial.rst +++ b/doc/_src_docs/sampling_methods/full_factorial.rst @@ -1,62 +1,62 @@ -Full-factorial sampling -======================= - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.sampling_methods import FullFactorial - - xlimits = np.array([[0.0, 4.0], [0.0, 3.0]]) - sampling = FullFactorial(xlimits=xlimits) - - num = 50 - x = sampling(num) - - print(x.shape) - - plt.plot(x[:, 0], x[:, 1], "o") - plt.xlabel("x") - plt.ylabel("y") - plt.show() - -:: - - (50, 2) - -.. figure:: full_factorial_Test_test_full_factorial.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - xlimits - - None - - None - - ['ndarray'] - - The interval of the domain in each dimension with shape nx x 2 (required) - * - weights - - None - - None - - ['list', 'ndarray'] - - relative sampling weights for each nx dimensions - * - clip - - False - - None - - ['bool'] - - round number of samples to the sampling number product of each nx dimensions (> asked nt) +Full-factorial sampling +======================= + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.sampling_methods import FullFactorial + + xlimits = np.array([[0.0, 4.0], [0.0, 3.0]]) + sampling = FullFactorial(xlimits=xlimits) + + num = 50 + x = sampling(num) + + print(x.shape) + + plt.plot(x[:, 0], x[:, 1], "o") + plt.xlabel("x") + plt.ylabel("y") + plt.show() + +:: + + (50, 2) + +.. figure:: full_factorial_Test_test_full_factorial.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - xlimits + - None + - None + - ['ndarray'] + - The interval of the domain in each dimension with shape nx x 2 (required) + * - weights + - None + - None + - ['list', 'ndarray'] + - relative sampling weights for each nx dimensions + * - clip + - False + - None + - ['bool'] + - round number of samples to the sampling number product of each nx dimensions (> asked nt) diff --git a/doc/_src_docs/sampling_methods/full_factorial_Test_test_full_factorial.png b/doc/_src_docs/sampling_methods/full_factorial_Test_test_full_factorial.png index be4710348..815293544 100644 Binary files a/doc/_src_docs/sampling_methods/full_factorial_Test_test_full_factorial.png and b/doc/_src_docs/sampling_methods/full_factorial_Test_test_full_factorial.png differ diff --git a/doc/_src_docs/sampling_methods/lhs.rst b/doc/_src_docs/sampling_methods/lhs.rst index 075f645eb..06748920f 100644 --- a/doc/_src_docs/sampling_methods/lhs.rst +++ b/doc/_src_docs/sampling_methods/lhs.rst @@ -1,79 +1,79 @@ -Latin Hypercube sampling -======================== - -The LHS design is a statistical method for generating a quasi-random sampling distribution. It is among the most popular sampling techniques in computer experiments thanks to its simplicity and projection properties with high-dimensional problems. LHS is built as follows: we cut each dimension space, which represents a variable, into n -sections where n is the number of sampling points, and we put only one point in each section. - -The LHS method uses the pyDOE package (Design of Experiments for Python) [1]_. Five criteria for the construction of LHS are implemented in SMT: - -- Center the points within the sampling intervals. -- Maximize the minimum distance between points and place the point in a randomized location within its interval. -- Maximize the minimum distance between points and center the point within its interval. -- Minimize the maximum correlation coefficient. -- Optimize the design using the Enhanced Stochastic Evolutionary algorithm (ESE). - -The four first criteria are the same than in pyDOE (for more details, see [1]_). The last criterion, ESE, is implemented by the authors of SMT (more details about such method could be found in [2]_). - -.. [1] https://pythonhosted.org/pyDOE/index.html - -.. [2] Jin, R. and Chen, W. and Sudjianto, A. (2005), "An efficient algorithm for constructing optimal design of computer experiments." Journal of Statistical Planning and Inference, 134:268-287. - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.sampling_methods import LHS - - xlimits = np.array([[0.0, 4.0], [0.0, 3.0]]) - sampling = LHS(xlimits=xlimits) - - num = 50 - x = sampling(num) - - print(x.shape) - - plt.plot(x[:, 0], x[:, 1], "o") - plt.xlabel("x") - plt.ylabel("y") - plt.show() - -:: - - (50, 2) - -.. figure:: lhs_Test_test_lhs.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - xlimits - - None - - None - - ['ndarray'] - - The interval of the domain in each dimension with shape nx x 2 (required) - * - criterion - - c - - ['center', 'maximin', 'centermaximin', 'correlation', 'c', 'm', 'cm', 'corr', 'ese'] - - ['str'] - - criterion used to construct the LHS design c, m, cm and corr are abbreviation of center, maximin, centermaximin and correlation, respectively - * - random_state - - None - - None - - ['NoneType', 'int', 'RandomState'] - - Numpy RandomState object or seed number which controls random draws +Latin Hypercube sampling +======================== + +The LHS design is a statistical method for generating a quasi-random sampling distribution. It is among the most popular sampling techniques in computer experiments thanks to its simplicity and projection properties with high-dimensional problems. LHS is built as follows: we cut each dimension space, which represents a variable, into n +sections where n is the number of sampling points, and we put only one point in each section. + +The LHS method uses the pyDOE package (Design of Experiments for Python) [1]_. Five criteria for the construction of LHS are implemented in SMT: + +- Center the points within the sampling intervals. +- Maximize the minimum distance between points and place the point in a randomized location within its interval. +- Maximize the minimum distance between points and center the point within its interval. +- Minimize the maximum correlation coefficient. +- Optimize the design using the Enhanced Stochastic Evolutionary algorithm (ESE). + +The four first criteria are the same than in pyDOE (for more details, see [1]_). The last criterion, ESE, is implemented by the authors of SMT (more details about such method could be found in [2]_). + +.. [1] https://pythonhosted.org/pyDOE/index.html + +.. [2] Jin, R. and Chen, W. and Sudjianto, A. (2005), "An efficient algorithm for constructing optimal design of computer experiments." Journal of Statistical Planning and Inference, 134:268-287. + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.sampling_methods import LHS + + xlimits = np.array([[0.0, 4.0], [0.0, 3.0]]) + sampling = LHS(xlimits=xlimits) + + num = 50 + x = sampling(num) + + print(x.shape) + + plt.plot(x[:, 0], x[:, 1], "o") + plt.xlabel("x") + plt.ylabel("y") + plt.show() + +:: + + (50, 2) + +.. figure:: lhs_Test_test_lhs.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - xlimits + - None + - None + - ['ndarray'] + - The interval of the domain in each dimension with shape nx x 2 (required) + * - criterion + - c + - ['center', 'maximin', 'centermaximin', 'correlation', 'c', 'm', 'cm', 'corr', 'ese'] + - ['str'] + - criterion used to construct the LHS design c, m, cm and corr are abbreviation of center, maximin, centermaximin and correlation, respectively + * - random_state + - None + - None + - ['NoneType', 'int', 'RandomState'] + - Numpy RandomState object or seed number which controls random draws diff --git a/doc/_src_docs/sampling_methods/lhs_Test_test_lhs.png b/doc/_src_docs/sampling_methods/lhs_Test_test_lhs.png index 87a09fe2d..b8741cad4 100644 Binary files a/doc/_src_docs/sampling_methods/lhs_Test_test_lhs.png and b/doc/_src_docs/sampling_methods/lhs_Test_test_lhs.png differ diff --git a/doc/_src_docs/sampling_methods/random.rst b/doc/_src_docs/sampling_methods/random.rst index e2ad221da..90aaf11bf 100644 --- a/doc/_src_docs/sampling_methods/random.rst +++ b/doc/_src_docs/sampling_methods/random.rst @@ -1,54 +1,54 @@ -Random sampling -=============== - -This class creates random samples from a uniform distribution over the design space. - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.sampling_methods import Random - - xlimits = np.array([[0.0, 4.0], [0.0, 3.0]]) - sampling = Random(xlimits=xlimits) - - num = 50 - x = sampling(num) - - print(x.shape) - - plt.plot(x[:, 0], x[:, 1], "o") - plt.xlabel("x") - plt.ylabel("y") - plt.show() - -:: - - (50, 2) - -.. figure:: random_Test_test_random.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - xlimits - - None - - None - - ['ndarray'] - - The interval of the domain in each dimension with shape nx x 2 (required) +Random sampling +=============== + +This class creates random samples from a uniform distribution over the design space. + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.sampling_methods import Random + + xlimits = np.array([[0.0, 4.0], [0.0, 3.0]]) + sampling = Random(xlimits=xlimits) + + num = 50 + x = sampling(num) + + print(x.shape) + + plt.plot(x[:, 0], x[:, 1], "o") + plt.xlabel("x") + plt.ylabel("y") + plt.show() + +:: + + (50, 2) + +.. figure:: random_Test_test_random.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - xlimits + - None + - None + - ['ndarray'] + - The interval of the domain in each dimension with shape nx x 2 (required) diff --git a/doc/_src_docs/sampling_methods/random_Test_test_random.png b/doc/_src_docs/sampling_methods/random_Test_test_random.png index 4c885bbd6..2b9131632 100644 Binary files a/doc/_src_docs/sampling_methods/random_Test_test_random.png and b/doc/_src_docs/sampling_methods/random_Test_test_random.png differ diff --git a/doc/_src_docs/sampling_methods_Test_test_random.png b/doc/_src_docs/sampling_methods_Test_test_random.png index 705ea6375..ff80c475d 100644 Binary files a/doc/_src_docs/sampling_methods_Test_test_random.png and b/doc/_src_docs/sampling_methods_Test_test_random.png differ diff --git a/doc/_src_docs/surrogate_models.rst b/doc/_src_docs/surrogate_models.rst index 283ce9ea1..bcde1fbe2 100644 --- a/doc/_src_docs/surrogate_models.rst +++ b/doc/_src_docs/surrogate_models.rst @@ -1,173 +1,173 @@ -Surrogate modeling methods -========================== - -SMT contains the surrogate modeling methods listed below. - -.. toctree:: - :maxdepth: 1 - :titlesonly: - - surrogate_models/rbf - surrogate_models/idw - surrogate_models/rmts - surrogate_models/ls - surrogate_models/qp - surrogate_models/krg - surrogate_models/kpls - surrogate_models/kplsk - surrogate_models/gekpls - surrogate_models/genn - surrogate_models/mgp - - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.surrogate_models import RBF - - xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) - yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0]) - - sm = RBF(d0=5) - sm.set_training_values(xt, yt) - sm.train() - - num = 100 - x = np.linspace(0.0, 4.0, num) - y = sm.predict_values(x) - - plt.plot(xt, yt, "o") - plt.plot(x, y) - plt.xlabel("x") - plt.ylabel("y") - plt.legend(["Training data", "Prediction"]) - plt.show() - -:: - - ___________________________________________________________________________ - - RBF - ___________________________________________________________________________ - - Problem size - - # training points. : 5 - - ___________________________________________________________________________ - - Training - - Training ... - Initializing linear solver ... - Performing LU fact. (5 x 5 mtx) ... - Performing LU fact. (5 x 5 mtx) - done. Time (sec): 0.0001283 - Initializing linear solver - done. Time (sec): 0.0001543 - Solving linear system (col. 0) ... - Back solving (5 x 5 mtx) ... - Back solving (5 x 5 mtx) - done. Time (sec): 0.0000749 - Solving linear system (col. 0) - done. Time (sec): 0.0000951 - Training - done. Time (sec): 0.0006111 - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0001354 - - Prediction time/pt. (sec) : 0.0000014 - - -.. figure:: surrogate_models_Test_test_rbf.png - :scale: 80 % - :align: center - -SurrogateModel class API ------------------------- - -All surrogate modeling methods implement the following API, though some of the functions in the API are not supported by all methods. - -.. autoclass:: smt.surrogate_models.surrogate_model.SurrogateModel - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.__init__ - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.set_training_values - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.set_training_derivatives - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.train - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.predict_values - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.predict_derivatives - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.predict_output_derivatives - - .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.predict_variances - - -How to save and load trained surrogate models ---------------------------------------------- - -The SurrogateModel API does not contain any save/load interface. -Therefore the user has to handle these operations by him/herself. Below some tips to implement save and load. - -For models written in pure Python -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -These operations can be implemented using the `pickle `_ module. - -Saving the model -"""""""""""""""" - -.. code-block:: python - - sm = KRG() - sm.set_training_values(xtrain, ytrain) - sm.train() - - filename = "kriging.pkl" - with open(filename, "wb") as f: - pickle.dump(sm, f) - -Loading the model -""""""""""""""""" -.. code-block:: python - - sm2 = None - filename = "kriging.pkl" - with open(filename, "rb") as f: - sm2 = pickle.load(f) - - -For models written in C++ (namely IDW, RBF, RMTB and RMTC) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -These models can be cached using their `data_dir` option. Provided the user gives the same training values -the model is not retrained but reloaded from cache directory. So by saving the cache directory and the training data, -one is able to avoid the training cost and reload the model from cached data. - -Saving the model -"""""""""""""""" - -.. code-block:: python - - sm = RBF(data_dir="./cache") - sm.set_training_values(xtrain, ytrain) - sm.train() - -Loading the model -""""""""""""""""" - -.. code-block:: python - - sm2 = RBF(data_dir="./cache") - sm2.set_training_values(xtrain, ytrain) # same training data as above! - sm2.train() # actual training is skipped, cached data model is loaded +Surrogate modeling methods +========================== + +SMT contains the surrogate modeling methods listed below. + +.. toctree:: + :maxdepth: 1 + :titlesonly: + + surrogate_models/rbf + surrogate_models/idw + surrogate_models/rmts + surrogate_models/ls + surrogate_models/qp + surrogate_models/krg + surrogate_models/kpls + surrogate_models/kplsk + surrogate_models/gekpls + surrogate_models/genn + surrogate_models/mgp + + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.surrogate_models import RBF + + xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) + yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0]) + + sm = RBF(d0=5) + sm.set_training_values(xt, yt) + sm.train() + + num = 100 + x = np.linspace(0.0, 4.0, num) + y = sm.predict_values(x) + + plt.plot(xt, yt, "o") + plt.plot(x, y) + plt.xlabel("x") + plt.ylabel("y") + plt.legend(["Training data", "Prediction"]) + plt.show() + +:: + + ___________________________________________________________________________ + + RBF + ___________________________________________________________________________ + + Problem size + + # training points. : 5 + + ___________________________________________________________________________ + + Training + + Training ... + Initializing linear solver ... + Performing LU fact. (5 x 5 mtx) ... + Performing LU fact. (5 x 5 mtx) - done. Time (sec): 0.0005803 + Initializing linear solver - done. Time (sec): 0.0006382 + Solving linear system (col. 0) ... + Back solving (5 x 5 mtx) ... + Back solving (5 x 5 mtx) - done. Time (sec): 0.0000000 + Solving linear system (col. 0) - done. Time (sec): 0.0000000 + Training - done. Time (sec): 0.0006382 + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + +.. figure:: surrogate_models_Test_test_rbf.png + :scale: 80 % + :align: center + +SurrogateModel class API +------------------------ + +All surrogate modeling methods implement the following API, though some of the functions in the API are not supported by all methods. + +.. autoclass:: smt.surrogate_models.surrogate_model.SurrogateModel + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.__init__ + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.set_training_values + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.set_training_derivatives + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.train + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.predict_values + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.predict_derivatives + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.predict_output_derivatives + + .. automethod:: smt.surrogate_models.surrogate_model.SurrogateModel.predict_variances + + +How to save and load trained surrogate models +--------------------------------------------- + +The SurrogateModel API does not contain any save/load interface. +Therefore the user has to handle these operations by him/herself. Below some tips to implement save and load. + +For models written in pure Python +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +These operations can be implemented using the `pickle `_ module. + +Saving the model +"""""""""""""""" + +.. code-block:: python + + sm = KRG() + sm.set_training_values(xtrain, ytrain) + sm.train() + + filename = "kriging.pkl" + with open(filename, "wb") as f: + pickle.dump(sm, f) + +Loading the model +""""""""""""""""" +.. code-block:: python + + sm2 = None + filename = "kriging.pkl" + with open(filename, "rb") as f: + sm2 = pickle.load(f) + + +For models written in C++ (namely IDW, RBF, RMTB and RMTC) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +These models can be cached using their `data_dir` option. Provided the user gives the same training values +the model is not retrained but reloaded from cache directory. So by saving the cache directory and the training data, +one is able to avoid the training cost and reload the model from cached data. + +Saving the model +"""""""""""""""" + +.. code-block:: python + + sm = RBF(data_dir="./cache") + sm.set_training_values(xtrain, ytrain) + sm.train() + +Loading the model +""""""""""""""""" + +.. code-block:: python + + sm2 = RBF(data_dir="./cache") + sm2.set_training_values(xtrain, ytrain) # same training data as above! + sm2.train() # actual training is skipped, cached data model is loaded diff --git a/doc/_src_docs/surrogate_models/gekpls.rst b/doc/_src_docs/surrogate_models/gekpls.rst index c4fc330a4..55eb4b9f9 100644 --- a/doc/_src_docs/surrogate_models/gekpls.rst +++ b/doc/_src_docs/surrogate_models/gekpls.rst @@ -39,6 +39,7 @@ Usage from smt.surrogate_models import GEKPLS from smt.problems import Sphere from smt.sampling_methods import LHS + from smt.utils.kriging import XSpecs # Construction of the DOE fun = Sphere(ndim=2) @@ -49,12 +50,12 @@ Usage for i in range(2): yd = fun(xt, kx=i) yt = np.concatenate((yt, yd), axis=1) - + xspecs = XSpecs(xlimits=fun.xlimits) # Build the GEKPLS model n_comp = 2 sm = GEKPLS( + xspecs=xspecs, theta0=[1e-2] * n_comp, - xlimits=fun.xlimits, extra_points=1, print_prediction=False, n_comp=n_comp, @@ -77,7 +78,7 @@ Usage ) fig = plt.figure() - ax = fig.gca(projection="3d") + ax = fig.add_subplot(projection="3d") surf = ax.plot_surface(X, Y, Z) plt.show() @@ -98,7 +99,7 @@ Usage Training Training ... - Training - done. Time (sec): 0.1266608 + Training - done. Time (sec): 0.1515756 .. figure:: gekpls_Test_test_gekpls.png :scale: 80 % @@ -154,14 +155,9 @@ Options - Correlation function type * - categorical_kernel - None - - ['continuous_relaxation_matrix_kernel', 'gower_matrix_kernel', 'exponential_homoscedastic_matrix_kernel', 'homoscedastic_matrix_kernel'] + - [, , , ] - None - The kernel to use for categorical inputs. Only for non continuous Kriging - * - xtypes - - None - - None - - ['list'] - - x type specifications: either FLOAT for continuous, INT for integer or (ENUM n) for categorical dimension with n levels * - nugget - 2.220446049250313e-14 - None @@ -207,6 +203,15 @@ Options - None - ['int'] - number of optimizer runs (multistart method) + * - xspecs + - None + - None + - ['XSpecs'] + - xspecs : x specifications including + xtypes: x types list + x types specification: list of either FLOAT, ORD or (ENUM, n) spec. + xlimits: array-like + bounds of x features * - n_comp - 2 - None @@ -227,11 +232,6 @@ Options - None - ['list'] - Number of components for PLS categorical kernel - * - xlimits - - None - - None - - ['ndarray'] - - Lower/upper bounds in each dimension - ndarray [nx, 2] * - delta_x - 0.0001 - None diff --git a/doc/_src_docs/surrogate_models/gekpls_Test_test_gekpls.png b/doc/_src_docs/surrogate_models/gekpls_Test_test_gekpls.png index 3b3761460..edda0ebb6 100644 Binary files a/doc/_src_docs/surrogate_models/gekpls_Test_test_gekpls.png and b/doc/_src_docs/surrogate_models/gekpls_Test_test_gekpls.png differ diff --git a/doc/_src_docs/surrogate_models/genn.rst b/doc/_src_docs/surrogate_models/genn.rst index abd69916b..c4c14b841 100644 --- a/doc/_src_docs/surrogate_models/genn.rst +++ b/doc/_src_docs/surrogate_models/genn.rst @@ -1,244 +1,244 @@ -GENN -==== - -Gradient-Enhanced Neural Networks (GENN) are fully connected multi-layer perceptrons, whose training process was modified to -account for gradient information. Specifically, the parameters are learned by minimizing the Least Squares Estimator (LSE), -modified to account for partial derivatives. The theory behind the algorithm can be found `here`_, -but suffice it to say that the model is trained in such a way so as to minimize not only the prediction error :math:`y - f(x)` of -the response, but also the prediction error :math:`{dy}/{dx} - f'(x)` of the partial derivatives. The chief benefit of gradient-enhancement -is better accuracy with fewer training points, compared to regular neural networks without gradient-enhancement. Note that GENN applies -to regression (single-output or multi-output), but not classification since there is no gradient in that case. The implementation -is fully vectorized and uses Adam optimization, mini-batch, and L2-norm regularization. - -.. _here: https://github.com/SMTorg/smt/blob/master/doc/_src_docs/surrogate_models/genn_theory.pdf - -Limitations ------------ - -Gradient-enhanced methods only apply to the special use-case of computer aided design, where data is generated -synthetically using physics-based computer models, responses are continuous, and their gradient is defined. Furthermore, -gradient enhancement is only beneficial when the cost of obtaining the gradient is not excessive in the first place. -This is often true in computer-aided design with the advent of adjoint design methods for example, but it is not always -the case. The user should therefore carefully weight the benefit of gradient-enhanced methods depending on the application. - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - from smt.surrogate_models.genn import GENN, load_smt_data - - # Training data - lower_bound = -np.pi - upper_bound = np.pi - number_of_training_points = 4 - xt = np.linspace(lower_bound, upper_bound, number_of_training_points) - yt = xt * np.sin(xt) - dyt_dxt = np.sin(xt) + xt * np.cos(xt) - - # Validation data - number_of_validation_points = 30 - xv = np.linspace(lower_bound, upper_bound, number_of_validation_points) - yv = xv * np.sin(xv) - dyv_dxv = np.sin(xv) + xv * np.cos(xv) - - # Truth model - x = np.arange(lower_bound, upper_bound, 0.01) - y = x * np.sin(x) - - # GENN - genn = GENN() - genn.options["alpha"] = 0.1 # learning rate that controls optimizer step size - genn.options["beta1"] = 0.9 # tuning parameter to control ADAM optimization - genn.options["beta2"] = 0.99 # tuning parameter to control ADAM optimization - genn.options[ - "lambd" - ] = 0.1 # lambd = 0. = no regularization, lambd > 0 = regularization - genn.options[ - "gamma" - ] = 1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement - genn.options["deep"] = 2 # number of hidden layers - genn.options["wide"] = 6 # number of nodes per hidden layer - genn.options[ - "mini_batch_size" - ] = 64 # used to divide data into training batches (use for large data sets) - genn.options["num_epochs"] = 20 # number of passes through data - genn.options[ - "num_iterations" - ] = 100 # number of optimizer iterations per mini-batch - genn.options["is_print"] = True # print output (or not) - load_smt_data( - genn, xt, yt, dyt_dxt - ) # convenience function to read in data that is in SMT format - genn.train() # API function to train model - genn.plot_training_history() # non-API function to plot training history (to check convergence) - genn.goodness_of_fit( - xv, yv, dyv_dxv - ) # non-API function to check accuracy of regression - y_pred = genn.predict_values( - x - ) # API function to predict values at new (unseen) points - - # Plot - fig, ax = plt.subplots() - ax.plot(x, y_pred) - ax.plot(x, y, "k--") - ax.plot(xv, yv, "ro") - ax.plot(xt, yt, "k+", mew=3, ms=10) - ax.set(xlabel="x", ylabel="y", title="GENN") - ax.legend(["Predicted", "True", "Test", "Train"]) - plt.show() - -:: - - ___________________________________________________________________________ - - GENN - ___________________________________________________________________________ - - Problem size - - # training points. : 4 - - ___________________________________________________________________________ - - Training - - Training ... - epoch = 0, mini-batch = 0, avg cost = 22.881 - epoch = 1, mini-batch = 0, avg cost = 7.640 - epoch = 2, mini-batch = 0, avg cost = 7.474 - epoch = 3, mini-batch = 0, avg cost = 7.379 - epoch = 4, mini-batch = 0, avg cost = 7.308 - epoch = 5, mini-batch = 0, avg cost = 4.056 - epoch = 6, mini-batch = 0, avg cost = 0.701 - epoch = 7, mini-batch = 0, avg cost = 0.660 - epoch = 8, mini-batch = 0, avg cost = 0.647 - epoch = 9, mini-batch = 0, avg cost = 0.641 - epoch = 10, mini-batch = 0, avg cost = 0.637 - epoch = 11, mini-batch = 0, avg cost = 0.634 - epoch = 12, mini-batch = 0, avg cost = 0.632 - epoch = 13, mini-batch = 0, avg cost = 0.630 - epoch = 14, mini-batch = 0, avg cost = 0.629 - epoch = 15, mini-batch = 0, avg cost = 0.628 - epoch = 16, mini-batch = 0, avg cost = 0.627 - epoch = 17, mini-batch = 0, avg cost = 0.627 - epoch = 18, mini-batch = 0, avg cost = 0.627 - epoch = 19, mini-batch = 0, avg cost = 0.626 - Training - done. Time (sec): 3.2554069 - ___________________________________________________________________________ - - Evaluation - - # eval points. : 629 - - Predicting ... - Predicting - done. Time (sec): 0.0000818 - - Prediction time/pt. (sec) : 0.0000001 - - -.. figure:: genn_Test_test_genn.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - print_global - - True - - None - - ['bool'] - - Global print toggle. If False, all printing is suppressed - * - print_training - - True - - None - - ['bool'] - - Whether to print training information - * - print_prediction - - True - - None - - ['bool'] - - Whether to print prediction information - * - print_problem - - True - - None - - ['bool'] - - Whether to print problem information - * - print_solver - - True - - None - - ['bool'] - - Whether to print solver information - * - alpha - - 0.5 - - None - - ['int', 'float'] - - optimizer learning rate - * - beta1 - - 0.9 - - None - - ['int', 'float'] - - Adam optimizer tuning parameter - * - beta2 - - 0.99 - - None - - ['int', 'float'] - - Adam optimizer tuning parameter - * - lambd - - 0.1 - - None - - ['int', 'float'] - - regularization coefficient - * - gamma - - 1.0 - - None - - ['int', 'float'] - - gradient-enhancement coefficient - * - deep - - 2 - - None - - ['int'] - - number of hidden layers - * - wide - - 2 - - None - - ['int'] - - number of nodes per hidden layer - * - mini_batch_size - - 64 - - None - - ['int'] - - split data into batches of specified size - * - num_epochs - - 10 - - None - - ['int'] - - number of random passes through the data - * - num_iterations - - 100 - - None - - ['int'] - - number of optimizer iterations per mini-batch - * - seed - - None - - None - - ['int'] - - random seed to ensure repeatability of results when desired - * - is_print - - True - - None - - ['bool'] - - print progress (or not) +GENN +==== + +Gradient-Enhanced Neural Networks (GENN) are fully connected multi-layer perceptrons, whose training process was modified to +account for gradient information. Specifically, the parameters are learned by minimizing the Least Squares Estimator (LSE), +modified to account for partial derivatives. The theory behind the algorithm can be found `here`_, +but suffice it to say that the model is trained in such a way so as to minimize not only the prediction error :math:`y - f(x)` of +the response, but also the prediction error :math:`{dy}/{dx} - f'(x)` of the partial derivatives. The chief benefit of gradient-enhancement +is better accuracy with fewer training points, compared to regular neural networks without gradient-enhancement. Note that GENN applies +to regression (single-output or multi-output), but not classification since there is no gradient in that case. The implementation +is fully vectorized and uses Adam optimization, mini-batch, and L2-norm regularization. + +.. _here: https://github.com/SMTorg/smt/blob/master/doc/_src_docs/surrogate_models/genn_theory.pdf + +Limitations +----------- + +Gradient-enhanced methods only apply to the special use-case of computer aided design, where data is generated +synthetically using physics-based computer models, responses are continuous, and their gradient is defined. Furthermore, +gradient enhancement is only beneficial when the cost of obtaining the gradient is not excessive in the first place. +This is often true in computer-aided design with the advent of adjoint design methods for example, but it is not always +the case. The user should therefore carefully weight the benefit of gradient-enhanced methods depending on the application. + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + from smt.surrogate_models.genn import GENN, load_smt_data + + # Training data + lower_bound = -np.pi + upper_bound = np.pi + number_of_training_points = 4 + xt = np.linspace(lower_bound, upper_bound, number_of_training_points) + yt = xt * np.sin(xt) + dyt_dxt = np.sin(xt) + xt * np.cos(xt) + + # Validation data + number_of_validation_points = 30 + xv = np.linspace(lower_bound, upper_bound, number_of_validation_points) + yv = xv * np.sin(xv) + dyv_dxv = np.sin(xv) + xv * np.cos(xv) + + # Truth model + x = np.arange(lower_bound, upper_bound, 0.01) + y = x * np.sin(x) + + # GENN + genn = GENN() + genn.options["alpha"] = 0.1 # learning rate that controls optimizer step size + genn.options["beta1"] = 0.9 # tuning parameter to control ADAM optimization + genn.options["beta2"] = 0.99 # tuning parameter to control ADAM optimization + genn.options[ + "lambd" + ] = 0.1 # lambd = 0. = no regularization, lambd > 0 = regularization + genn.options[ + "gamma" + ] = 1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement + genn.options["deep"] = 2 # number of hidden layers + genn.options["wide"] = 6 # number of nodes per hidden layer + genn.options[ + "mini_batch_size" + ] = 64 # used to divide data into training batches (use for large data sets) + genn.options["num_epochs"] = 20 # number of passes through data + genn.options[ + "num_iterations" + ] = 100 # number of optimizer iterations per mini-batch + genn.options["is_print"] = True # print output (or not) + load_smt_data( + genn, xt, yt, dyt_dxt + ) # convenience function to read in data that is in SMT format + genn.train() # API function to train model + genn.plot_training_history() # non-API function to plot training history (to check convergence) + genn.goodness_of_fit( + xv, yv, dyv_dxv + ) # non-API function to check accuracy of regression + y_pred = genn.predict_values( + x + ) # API function to predict values at new (unseen) points + + # Plot + fig, ax = plt.subplots() + ax.plot(x, y_pred) + ax.plot(x, y, "k--") + ax.plot(xv, yv, "ro") + ax.plot(xt, yt, "k+", mew=3, ms=10) + ax.set(xlabel="x", ylabel="y", title="GENN") + ax.legend(["Predicted", "True", "Test", "Train"]) + plt.show() + +:: + + ___________________________________________________________________________ + + GENN + ___________________________________________________________________________ + + Problem size + + # training points. : 4 + + ___________________________________________________________________________ + + Training + + Training ... + epoch = 0, mini-batch = 0, avg cost = 19.248 + epoch = 1, mini-batch = 0, avg cost = 1.091 + epoch = 2, mini-batch = 0, avg cost = 0.834 + epoch = 3, mini-batch = 0, avg cost = 0.760 + epoch = 4, mini-batch = 0, avg cost = 0.699 + epoch = 5, mini-batch = 0, avg cost = 0.661 + epoch = 6, mini-batch = 0, avg cost = 0.636 + epoch = 7, mini-batch = 0, avg cost = 0.618 + epoch = 8, mini-batch = 0, avg cost = 0.603 + epoch = 9, mini-batch = 0, avg cost = 0.590 + epoch = 10, mini-batch = 0, avg cost = 0.581 + epoch = 11, mini-batch = 0, avg cost = 0.573 + epoch = 12, mini-batch = 0, avg cost = 0.568 + epoch = 13, mini-batch = 0, avg cost = 0.563 + epoch = 14, mini-batch = 0, avg cost = 0.560 + epoch = 15, mini-batch = 0, avg cost = 0.557 + epoch = 16, mini-batch = 0, avg cost = 0.555 + epoch = 17, mini-batch = 0, avg cost = 0.554 + epoch = 18, mini-batch = 0, avg cost = 0.553 + epoch = 19, mini-batch = 0, avg cost = 0.552 + Training - done. Time (sec): 3.6033313 + ___________________________________________________________________________ + + Evaluation + + # eval points. : 629 + + Predicting ... + Predicting - done. Time (sec): 0.0009975 + + Prediction time/pt. (sec) : 0.0000016 + + +.. figure:: genn_Test_test_genn.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - print_global + - True + - None + - ['bool'] + - Global print toggle. If False, all printing is suppressed + * - print_training + - True + - None + - ['bool'] + - Whether to print training information + * - print_prediction + - True + - None + - ['bool'] + - Whether to print prediction information + * - print_problem + - True + - None + - ['bool'] + - Whether to print problem information + * - print_solver + - True + - None + - ['bool'] + - Whether to print solver information + * - alpha + - 0.5 + - None + - ['int', 'float'] + - optimizer learning rate + * - beta1 + - 0.9 + - None + - ['int', 'float'] + - Adam optimizer tuning parameter + * - beta2 + - 0.99 + - None + - ['int', 'float'] + - Adam optimizer tuning parameter + * - lambd + - 0.1 + - None + - ['int', 'float'] + - regularization coefficient + * - gamma + - 1.0 + - None + - ['int', 'float'] + - gradient-enhancement coefficient + * - deep + - 2 + - None + - ['int'] + - number of hidden layers + * - wide + - 2 + - None + - ['int'] + - number of nodes per hidden layer + * - mini_batch_size + - 64 + - None + - ['int'] + - split data into batches of specified size + * - num_epochs + - 10 + - None + - ['int'] + - number of random passes through the data + * - num_iterations + - 100 + - None + - ['int'] + - number of optimizer iterations per mini-batch + * - seed + - None + - None + - ['int'] + - random seed to ensure repeatability of results when desired + * - is_print + - True + - None + - ['bool'] + - print progress (or not) diff --git a/doc/_src_docs/surrogate_models/genn_Test_test_genn.png b/doc/_src_docs/surrogate_models/genn_Test_test_genn.png index 4a85436f2..aedcbf260 100644 Binary files a/doc/_src_docs/surrogate_models/genn_Test_test_genn.png and b/doc/_src_docs/surrogate_models/genn_Test_test_genn.png differ diff --git a/doc/_src_docs/surrogate_models/idw.rst b/doc/_src_docs/surrogate_models/idw.rst index cf5173d85..02bbb6973 100644 --- a/doc/_src_docs/surrogate_models/idw.rst +++ b/doc/_src_docs/surrogate_models/idw.rst @@ -1,146 +1,146 @@ -Inverse-distance weighting -========================== - -The inverse distance weighting [1]_ (IDW) model is an interpolating method -and the unknown points are calculated with a weighted average of the sampling points. - -The prediction equation for IDW is - -.. math :: - - y = - \left\{ - \begin{array}{ll} - \frac{\sum_i^{nt} \beta(\mathbf{x}, \mathbf{xt}_i) yt_i}{\sum_i^{nt} \beta(\mathbf{x}, \mathbf{xt}_i)}, - & \text{if} \quad \mathbf{x} \neq \mathbf{xt}_i \quad \forall i\\ - yt_i - & \text{if} \quad \mathbf{x} = \mathbf{xt}_i \quad \text{for some} \; i\\ - \end{array} - \right. , - -where -:math:`\mathbf{x} \in \mathbb{R}^{nx}` is the prediction input vector, -:math:`y \in \mathbb{R}` is the prediction output, -:math:`\mathbf{xt}_i \in \mathbb{R}^{nx}` is the input vector for the :math:`i` th training point, -and -:math:`yt_i \in \mathbb{R}` is the output value for the :math:`i` th training point. -The weighting function :math:`\beta` is defined by - -.. math :: - - \beta( \mathbf{x}_i , \mathbf{x}_j ) = || \mathbf{x}_i - \mathbf{x}_j ||_2 ^ {-p} , - -where :math:`p` a positive real number, called the power parameter. -This parameter must be strictly greater than 1 for the derivatives to be continuous. - -.. [1] Shepard, D., A Two-dimensional Interpolation Function for Irregularly-spaced Data, Proceedings of the 1968 23rd ACM National Conference, 1968, pp. 517--524. - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.surrogate_models import IDW - - xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) - yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0]) - - sm = IDW(p=2) - sm.set_training_values(xt, yt) - sm.train() - - num = 100 - x = np.linspace(0.0, 4.0, num) - y = sm.predict_values(x) - - plt.plot(xt, yt, "o") - plt.plot(x, y) - plt.xlabel("x") - plt.ylabel("y") - plt.legend(["Training data", "Prediction"]) - plt.show() - -:: - - ___________________________________________________________________________ - - IDW - ___________________________________________________________________________ - - Problem size - - # training points. : 5 - - ___________________________________________________________________________ - - Training - - Training ... - Training - done. Time (sec): 0.0000000 - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0000000 - - Prediction time/pt. (sec) : 0.0000000 - - -.. figure:: idw_Test_test_idw.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - print_global - - True - - None - - ['bool'] - - Global print toggle. If False, all printing is suppressed - * - print_training - - True - - None - - ['bool'] - - Whether to print training information - * - print_prediction - - True - - None - - ['bool'] - - Whether to print prediction information - * - print_problem - - True - - None - - ['bool'] - - Whether to print problem information - * - print_solver - - True - - None - - ['bool'] - - Whether to print solver information - * - p - - 2.5 - - None - - ['int', 'float'] - - order of distance norm - * - data_dir - - None - - None - - ['str'] - - Directory for loading / saving cached data; None means do not save or load +Inverse-distance weighting +========================== + +The inverse distance weighting [1]_ (IDW) model is an interpolating method +and the unknown points are calculated with a weighted average of the sampling points. + +The prediction equation for IDW is + +.. math :: + + y = + \left\{ + \begin{array}{ll} + \frac{\sum_i^{nt} \beta(\mathbf{x}, \mathbf{xt}_i) yt_i}{\sum_i^{nt} \beta(\mathbf{x}, \mathbf{xt}_i)}, + & \text{if} \quad \mathbf{x} \neq \mathbf{xt}_i \quad \forall i\\ + yt_i + & \text{if} \quad \mathbf{x} = \mathbf{xt}_i \quad \text{for some} \; i\\ + \end{array} + \right. , + +where +:math:`\mathbf{x} \in \mathbb{R}^{nx}` is the prediction input vector, +:math:`y \in \mathbb{R}` is the prediction output, +:math:`\mathbf{xt}_i \in \mathbb{R}^{nx}` is the input vector for the :math:`i` th training point, +and +:math:`yt_i \in \mathbb{R}` is the output value for the :math:`i` th training point. +The weighting function :math:`\beta` is defined by + +.. math :: + + \beta( \mathbf{x}_i , \mathbf{x}_j ) = || \mathbf{x}_i - \mathbf{x}_j ||_2 ^ {-p} , + +where :math:`p` a positive real number, called the power parameter. +This parameter must be strictly greater than 1 for the derivatives to be continuous. + +.. [1] Shepard, D., A Two-dimensional Interpolation Function for Irregularly-spaced Data, Proceedings of the 1968 23rd ACM National Conference, 1968, pp. 517--524. + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.surrogate_models import IDW + + xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) + yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0]) + + sm = IDW(p=2) + sm.set_training_values(xt, yt) + sm.train() + + num = 100 + x = np.linspace(0.0, 4.0, num) + y = sm.predict_values(x) + + plt.plot(xt, yt, "o") + plt.plot(x, y) + plt.xlabel("x") + plt.ylabel("y") + plt.legend(["Training data", "Prediction"]) + plt.show() + +:: + + ___________________________________________________________________________ + + IDW + ___________________________________________________________________________ + + Problem size + + # training points. : 5 + + ___________________________________________________________________________ + + Training + + Training ... + Training - done. Time (sec): 0.0009980 + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + +.. figure:: idw_Test_test_idw.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - print_global + - True + - None + - ['bool'] + - Global print toggle. If False, all printing is suppressed + * - print_training + - True + - None + - ['bool'] + - Whether to print training information + * - print_prediction + - True + - None + - ['bool'] + - Whether to print prediction information + * - print_problem + - True + - None + - ['bool'] + - Whether to print problem information + * - print_solver + - True + - None + - ['bool'] + - Whether to print solver information + * - p + - 2.5 + - None + - ['int', 'float'] + - order of distance norm + * - data_dir + - None + - None + - ['str'] + - Directory for loading / saving cached data; None means do not save or load diff --git a/doc/_src_docs/surrogate_models/kpls.rst b/doc/_src_docs/surrogate_models/kpls.rst index f92102c1e..215ec0788 100644 --- a/doc/_src_docs/surrogate_models/kpls.rst +++ b/doc/_src_docs/surrogate_models/kpls.rst @@ -83,7 +83,7 @@ Usage Training Training ... - Training - done. Time (sec): 0.0449092 + Training - done. Time (sec): 0.0339122 ___________________________________________________________________________ Evaluation @@ -117,7 +117,6 @@ Usage with an automatic number of components .. code-block:: python import numpy as np - import matplotlib.pyplot as plt from smt.surrogate_models import KPLS from smt.problems import TensorProduct from smt.sampling_methods import LHS @@ -159,7 +158,7 @@ Usage with an automatic number of components Training Training ... - Training - done. Time (sec): 3.3513072 + Training - done. Time (sec): 2.9034190 The model automatically choose 3 components. ___________________________________________________________________________ @@ -169,9 +168,9 @@ Usage with an automatic number of components # eval points. : 1 Predicting ... - Predicting - done. Time (sec): 0.0000000 + Predicting - done. Time (sec): 0.0009978 - Prediction time/pt. (sec) : 0.0000000 + Prediction time/pt. (sec) : 0.0009978 [[20.57448753]] [[1073.87724138]] @@ -227,14 +226,9 @@ Options - Correlation function type * - categorical_kernel - None - - ['continuous_relaxation_matrix_kernel', 'gower_matrix_kernel', 'exponential_homoscedastic_matrix_kernel', 'homoscedastic_matrix_kernel'] + - [, , , ] - None - The kernel to use for categorical inputs. Only for non continuous Kriging - * - xtypes - - None - - None - - ['list'] - - x type specifications: either FLOAT for continuous, INT for integer or (ENUM n) for categorical dimension with n levels * - nugget - 2.220446049250313e-14 - None @@ -280,6 +274,15 @@ Options - None - ['int'] - number of optimizer runs (multistart method) + * - xspecs + - None + - None + - ['XSpecs'] + - xspecs : x specifications including + xtypes: x types list + x types specification: list of either FLOAT, ORD or (ENUM, n) spec. + xlimits: array-like + bounds of x features * - n_comp - 1 - None diff --git a/doc/_src_docs/surrogate_models/kplsk.rst b/doc/_src_docs/surrogate_models/kplsk.rst index 98acfaebd..4eb6b8f74 100644 --- a/doc/_src_docs/surrogate_models/kplsk.rst +++ b/doc/_src_docs/surrogate_models/kplsk.rst @@ -85,7 +85,7 @@ Usage Training Training ... - Training - done. Time (sec): 0.0538588 + Training - done. Time (sec): 0.0638351 ___________________________________________________________________________ Evaluation @@ -93,9 +93,9 @@ Usage # eval points. : 100 Predicting ... - Predicting - done. Time (sec): 0.0009999 + Predicting - done. Time (sec): 0.0000000 - Prediction time/pt. (sec) : 0.0000100 + Prediction time/pt. (sec) : 0.0000000 ___________________________________________________________________________ @@ -104,9 +104,9 @@ Usage # eval points. : 5 Predicting ... - Predicting - done. Time (sec): 0.0000000 + Predicting - done. Time (sec): 0.0009973 - Prediction time/pt. (sec) : 0.0000000 + Prediction time/pt. (sec) : 0.0001995 .. figure:: kplsk_Test_test_kplsk.png @@ -163,14 +163,9 @@ Options - Correlation function type * - categorical_kernel - None - - ['gower', 'homoscedastic_gaussian_matrix_kernel', 'full_gaussian_matrix_kernel'] - - ['str'] - - The kernel to use for categorical inputs. Only for non continuous Kriging - * - xtypes - - None + - [, , , ] - None - - ['list'] - - x type specifications: either FLOAT for continuous, INT for integer or (ENUM n) for categorical dimension with n levels + - The kernel to use for categorical inputs. Only for non continuous Kriging * - nugget - 2.220446049250313e-14 - None @@ -216,6 +211,15 @@ Options - None - ['int'] - number of optimizer runs (multistart method) + * - xspecs + - None + - None + - ['XSpecs'] + - xspecs : x specifications including + xtypes: x types list + x types specification: list of either FLOAT, ORD or (ENUM, n) spec. + xlimits: array-like + bounds of x features * - n_comp - 1 - None @@ -231,3 +235,8 @@ Options - None - ['float'] - n_comp evaluation treshold for Wold's R criterion + * - cat_kernel_comps + - None + - None + - ['list'] + - Number of components for PLS categorical kernel diff --git a/doc/_src_docs/surrogate_models/kplsk_Test_test_kplsk.png b/doc/_src_docs/surrogate_models/kplsk_Test_test_kplsk.png index a5827686c..93f8451b2 100644 Binary files a/doc/_src_docs/surrogate_models/kplsk_Test_test_kplsk.png and b/doc/_src_docs/surrogate_models/kplsk_Test_test_kplsk.png differ diff --git a/doc/_src_docs/surrogate_models/krg.rst b/doc/_src_docs/surrogate_models/krg.rst index fa90b7216..ea3797cee 100644 --- a/doc/_src_docs/surrogate_models/krg.rst +++ b/doc/_src_docs/surrogate_models/krg.rst @@ -131,7 +131,7 @@ Example 1 Training Training ... - Training - done. Time (sec): 0.0408301 + Training - done. Time (sec): 0.0359063 ___________________________________________________________________________ Evaluation @@ -150,9 +150,9 @@ Example 1 # eval points. : 5 Predicting ... - Predicting - done. Time (sec): 0.0000000 + Predicting - done. Time (sec): 0.0009661 - Prediction time/pt. (sec) : 0.0000000 + Prediction time/pt. (sec) : 0.0001932 .. figure:: krg_Test_test_krg.png @@ -167,8 +167,9 @@ Example 2 with mixed variables import numpy as np import matplotlib.pyplot as plt - from smt.surrogate_models import KRG - from smt.applications.mixed_integer import MixedIntegerSurrogateModel, ORD + from smt.surrogate_models import KRG, XType + from smt.applications.mixed_integer import MixedIntegerKrigingModel + from smt.utils.kriging import XSpecs xt = np.array([0.0, 2.0, 3.0]) yt = np.array([0.0, 1.5, 0.9]) @@ -178,10 +179,8 @@ Example 2 with mixed variables # ORD means x2 integer # (ENUM, 3) means x3, x4 & x5 are 3 levels of the same categorical variable # (ENUM, 2) means x6 & x7 are 2 levels of the same categorical variable - - sm = MixedIntegerSurrogateModel( - xtypes=[ORD], xlimits=[[0, 4]], surrogate=KRG(theta0=[1e-2]) - ) + xspecs = XSpecs(xtypes=[XType.ORD], xlimits=[[0, 4]]) + sm = MixedIntegerKrigingModel(surrogate=KRG(xspecs=xspecs, theta0=[1e-2])) sm.set_training_values(xt, yt) sm.train() @@ -218,9 +217,9 @@ Example 2 with mixed variables # eval points. : 500 Predicting ... - Predicting - done. Time (sec): 0.0000000 + Predicting - done. Time (sec): 0.0009868 - Prediction time/pt. (sec) : 0.0000000 + Prediction time/pt. (sec) : 0.0000020 .. figure:: krg_Test_test_mixed_int_krg.png @@ -277,14 +276,9 @@ Options - Correlation function type * - categorical_kernel - None - - ['continuous_relaxation_matrix_kernel', 'gower_matrix_kernel', 'exponential_homoscedastic_matrix_kernel', 'homoscedastic_matrix_kernel'] + - [, , , ] - None - The kernel to use for categorical inputs. Only for non continuous Kriging - * - xtypes - - None - - None - - ['list'] - - x type specifications: either FLOAT for continuous, INT for integer or (ENUM n) for categorical dimension with n levels * - nugget - 2.220446049250313e-14 - None @@ -330,3 +324,12 @@ Options - None - ['int'] - number of optimizer runs (multistart method) + * - xspecs + - None + - None + - ['XSpecs'] + - xspecs : x specifications including + xtypes: x types list + x types specification: list of either FLOAT, ORD or (ENUM, n) spec. + xlimits: array-like + bounds of x features diff --git a/doc/_src_docs/surrogate_models/ls.rst b/doc/_src_docs/surrogate_models/ls.rst index 1f15d8a52..2c190a6d9 100644 --- a/doc/_src_docs/surrogate_models/ls.rst +++ b/doc/_src_docs/surrogate_models/ls.rst @@ -57,7 +57,7 @@ Usage Training Training ... - Training - done. Time (sec): 0.0019948 + Training - done. Time (sec): 0.0031133 ___________________________________________________________________________ Evaluation diff --git a/doc/_src_docs/surrogate_models/ls_Test_test_ls.png b/doc/_src_docs/surrogate_models/ls_Test_test_ls.png index a93f21de3..58d603815 100644 Binary files a/doc/_src_docs/surrogate_models/ls_Test_test_ls.png and b/doc/_src_docs/surrogate_models/ls_Test_test_ls.png differ diff --git a/doc/_src_docs/surrogate_models/mgp.rst b/doc/_src_docs/surrogate_models/mgp.rst index 89280f671..7618ab058 100644 --- a/doc/_src_docs/surrogate_models/mgp.rst +++ b/doc/_src_docs/surrogate_models/mgp.rst @@ -128,7 +128,7 @@ Usage Training Training ... - Training - done. Time (sec): 1.0243082 + Training - done. Time (sec): 0.9749014 .. figure:: mgp_Test_test_mgp.png :scale: 80 % @@ -184,14 +184,9 @@ Options - Correlation function type * - categorical_kernel - None - - ['continuous_relaxation_matrix_kernel', 'gower_matrix_kernel', 'exponential_homoscedastic_matrix_kernel', 'homoscedastic_matrix_kernel'] + - [, , , ] - None - The kernel to use for categorical inputs. Only for non continuous Kriging - * - xtypes - - None - - None - - ['list'] - - x type specifications: either FLOAT for continuous, INT for integer or (ENUM n) for categorical dimension with n levels * - nugget - 2.220446049250313e-14 - None @@ -237,6 +232,15 @@ Options - None - ['int'] - number of optimizer runs (multistart method) + * - xspecs + - None + - None + - ['XSpecs'] + - xspecs : x specifications including + xtypes: x types list + x types specification: list of either FLOAT, ORD or (ENUM, n) spec. + xlimits: array-like + bounds of x features * - n_comp - 1 - None diff --git a/doc/_src_docs/surrogate_models/rbf.rst b/doc/_src_docs/surrogate_models/rbf.rst index 92e889118..d5f1caa1a 100644 --- a/doc/_src_docs/surrogate_models/rbf.rst +++ b/doc/_src_docs/surrogate_models/rbf.rst @@ -1,188 +1,188 @@ -Radial basis functions -====================== - -The radial basis function (RBF) surrogate model represents the interpolating function -as a linear combination of basis functions, one for each training point. -RBFs are named as such because the basis functions depend only on -the distance from the prediction point to the training point for the basis function. -The coefficients of the basis functions are computed during the training stage. -RBFs are frequently augmented to global polynomials to capture the general trends. - -The prediction equation for RBFs is - -.. math :: - y = \mathbf{p}(\mathbf{x}) \mathbf{w_p} + \sum_i^{nt} \phi(\mathbf{x}, \mathbf{xt}_i) \mathbf{w_r} , - -where -:math:`\mathbf{x} \in \mathbb{R}^{nx}` is the prediction input vector, -:math:`y \in \mathbb{R}` is the prediction output, -:math:`\mathbf{xt}_i \in \mathbb{R}^{nx}` is the input vector for the :math:`i` th training point, -:math:`\mathbf{p}(\mathbf{x}) \in \mathbb{R}^{np}` is the vector mapping the polynomial coefficients to the prediction output, -:math:`\phi(\mathbf{x}, \mathbf{xt}_i) \in \mathbb{R}^{nt}` is the vector mapping the radial basis function coefficients to the prediction output, -:math:`\mathbf{w_p} \in \mathbb{R}^{np}` is the vector of polynomial coefficients, -and -:math:`\mathbf{w_r} \in \mathbb{R}^{nt}` is the vector of radial basis function coefficients. - -The coefficients, :math:`\mathbf{w_p}` and :math:`\mathbf{w_r}`, are computed by solving the follow linear system: - -.. math :: - - \begin{bmatrix} - \phi( \mathbf{xt}_1 , \mathbf{xt}_1 ) & \dots & \phi( \mathbf{xt}_1 , \mathbf{xt}_{nt} ) & \mathbf{p}(\mathbf{xt}_1) ^ T \\ - \vdots & \ddots & \vdots & \vdots \\ - \phi( \mathbf{xt}_{nt} , \mathbf{xt}_1 ) & \dots & \phi( \mathbf{xt}_{nt} , \mathbf{xt}_{nt} ) & \mathbf{p}( \mathbf{xt}_{nt} ) ^ T \\ - \mathbf{p}( \mathbf{xt}_1 ) & \dots & \mathbf{p}( \mathbf{xt}_{nt} ) & \mathbf{0} \\ - \end{bmatrix} - \begin{bmatrix} - \mathbf{w_r}_1 \\ - \vdots \\ - \mathbf{w_r}_{nt} \\ - \mathbf{w_p} \\ - \end{bmatrix} - = - \begin{bmatrix} - yt_1 \\ - \vdots \\ - yt_{nt} \\ - 0 \\ - \end{bmatrix} - -Only Gaussian basis functions are currently implemented. -These are given by: - -.. math :: - - \phi( \mathbf{x}_i , \mathbf{x}_j ) = \exp \left( \frac{|| \mathbf{x}_i - \mathbf{x}_j ||_2 ^ 2}{d0^2} \right) - -Usage ------ - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.surrogate_models import RBF - - xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) - yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0]) - - sm = RBF(d0=5) - sm.set_training_values(xt, yt) - sm.train() - - num = 100 - x = np.linspace(0.0, 4.0, num) - y = sm.predict_values(x) - - plt.plot(xt, yt, "o") - plt.plot(x, y) - plt.xlabel("x") - plt.ylabel("y") - plt.legend(["Training data", "Prediction"]) - plt.show() - -:: - - ___________________________________________________________________________ - - RBF - ___________________________________________________________________________ - - Problem size - - # training points. : 5 - - ___________________________________________________________________________ - - Training - - Training ... - Initializing linear solver ... - Performing LU fact. (5 x 5 mtx) ... - Performing LU fact. (5 x 5 mtx) - done. Time (sec): 0.0000000 - Initializing linear solver - done. Time (sec): 0.0000000 - Solving linear system (col. 0) ... - Back solving (5 x 5 mtx) ... - Back solving (5 x 5 mtx) - done. Time (sec): 0.0000000 - Solving linear system (col. 0) - done. Time (sec): 0.0000000 - Training - done. Time (sec): 0.0000000 - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0000000 - - Prediction time/pt. (sec) : 0.0000000 - - -.. figure:: rbf_Test_test_rbf.png - :scale: 80 % - :align: center - -Options -------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - print_global - - True - - None - - ['bool'] - - Global print toggle. If False, all printing is suppressed - * - print_training - - True - - None - - ['bool'] - - Whether to print training information - * - print_prediction - - True - - None - - ['bool'] - - Whether to print prediction information - * - print_problem - - True - - None - - ['bool'] - - Whether to print problem information - * - print_solver - - True - - None - - ['bool'] - - Whether to print solver information - * - d0 - - 1.0 - - None - - ['int', 'float', 'list', 'ndarray'] - - basis function scaling parameter in exp(-d^2 / d0^2) - * - poly_degree - - -1 - - [-1, 0, 1] - - ['int'] - - -1 means no global polynomial, 0 means constant, 1 means linear trend - * - data_dir - - None - - None - - ['str'] - - Directory for loading / saving cached data; None means do not save or load - * - reg - - 1e-10 - - None - - ['int', 'float'] - - Regularization coeff. - * - max_print_depth - - 5 - - None - - ['int'] - - Maximum depth (level of nesting) to print operation descriptions and times +Radial basis functions +====================== + +The radial basis function (RBF) surrogate model represents the interpolating function +as a linear combination of basis functions, one for each training point. +RBFs are named as such because the basis functions depend only on +the distance from the prediction point to the training point for the basis function. +The coefficients of the basis functions are computed during the training stage. +RBFs are frequently augmented to global polynomials to capture the general trends. + +The prediction equation for RBFs is + +.. math :: + y = \mathbf{p}(\mathbf{x}) \mathbf{w_p} + \sum_i^{nt} \phi(\mathbf{x}, \mathbf{xt}_i) \mathbf{w_r} , + +where +:math:`\mathbf{x} \in \mathbb{R}^{nx}` is the prediction input vector, +:math:`y \in \mathbb{R}` is the prediction output, +:math:`\mathbf{xt}_i \in \mathbb{R}^{nx}` is the input vector for the :math:`i` th training point, +:math:`\mathbf{p}(\mathbf{x}) \in \mathbb{R}^{np}` is the vector mapping the polynomial coefficients to the prediction output, +:math:`\phi(\mathbf{x}, \mathbf{xt}_i) \in \mathbb{R}^{nt}` is the vector mapping the radial basis function coefficients to the prediction output, +:math:`\mathbf{w_p} \in \mathbb{R}^{np}` is the vector of polynomial coefficients, +and +:math:`\mathbf{w_r} \in \mathbb{R}^{nt}` is the vector of radial basis function coefficients. + +The coefficients, :math:`\mathbf{w_p}` and :math:`\mathbf{w_r}`, are computed by solving the follow linear system: + +.. math :: + + \begin{bmatrix} + \phi( \mathbf{xt}_1 , \mathbf{xt}_1 ) & \dots & \phi( \mathbf{xt}_1 , \mathbf{xt}_{nt} ) & \mathbf{p}(\mathbf{xt}_1) ^ T \\ + \vdots & \ddots & \vdots & \vdots \\ + \phi( \mathbf{xt}_{nt} , \mathbf{xt}_1 ) & \dots & \phi( \mathbf{xt}_{nt} , \mathbf{xt}_{nt} ) & \mathbf{p}( \mathbf{xt}_{nt} ) ^ T \\ + \mathbf{p}( \mathbf{xt}_1 ) & \dots & \mathbf{p}( \mathbf{xt}_{nt} ) & \mathbf{0} \\ + \end{bmatrix} + \begin{bmatrix} + \mathbf{w_r}_1 \\ + \vdots \\ + \mathbf{w_r}_{nt} \\ + \mathbf{w_p} \\ + \end{bmatrix} + = + \begin{bmatrix} + yt_1 \\ + \vdots \\ + yt_{nt} \\ + 0 \\ + \end{bmatrix} + +Only Gaussian basis functions are currently implemented. +These are given by: + +.. math :: + + \phi( \mathbf{x}_i , \mathbf{x}_j ) = \exp \left( \frac{|| \mathbf{x}_i - \mathbf{x}_j ||_2 ^ 2}{d0^2} \right) + +Usage +----- + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.surrogate_models import RBF + + xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) + yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0]) + + sm = RBF(d0=5) + sm.set_training_values(xt, yt) + sm.train() + + num = 100 + x = np.linspace(0.0, 4.0, num) + y = sm.predict_values(x) + + plt.plot(xt, yt, "o") + plt.plot(x, y) + plt.xlabel("x") + plt.ylabel("y") + plt.legend(["Training data", "Prediction"]) + plt.show() + +:: + + ___________________________________________________________________________ + + RBF + ___________________________________________________________________________ + + Problem size + + # training points. : 5 + + ___________________________________________________________________________ + + Training + + Training ... + Initializing linear solver ... + Performing LU fact. (5 x 5 mtx) ... + Performing LU fact. (5 x 5 mtx) - done. Time (sec): 0.0000000 + Initializing linear solver - done. Time (sec): 0.0000000 + Solving linear system (col. 0) ... + Back solving (5 x 5 mtx) ... + Back solving (5 x 5 mtx) - done. Time (sec): 0.0000000 + Solving linear system (col. 0) - done. Time (sec): 0.0000000 + Training - done. Time (sec): 0.0000000 + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + +.. figure:: rbf_Test_test_rbf.png + :scale: 80 % + :align: center + +Options +------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - print_global + - True + - None + - ['bool'] + - Global print toggle. If False, all printing is suppressed + * - print_training + - True + - None + - ['bool'] + - Whether to print training information + * - print_prediction + - True + - None + - ['bool'] + - Whether to print prediction information + * - print_problem + - True + - None + - ['bool'] + - Whether to print problem information + * - print_solver + - True + - None + - ['bool'] + - Whether to print solver information + * - d0 + - 1.0 + - None + - ['int', 'float', 'list', 'ndarray'] + - basis function scaling parameter in exp(-d^2 / d0^2) + * - poly_degree + - -1 + - [-1, 0, 1] + - ['int'] + - -1 means no global polynomial, 0 means constant, 1 means linear trend + * - data_dir + - None + - None + - ['str'] + - Directory for loading / saving cached data; None means do not save or load + * - reg + - 1e-10 + - None + - ['int', 'float'] + - Regularization coeff. + * - max_print_depth + - 5 + - None + - ['int'] + - Maximum depth (level of nesting) to print operation descriptions and times diff --git a/doc/_src_docs/surrogate_models/rmts.rst b/doc/_src_docs/surrogate_models/rmts.rst index ebaae2e03..57eefe25d 100644 --- a/doc/_src_docs/surrogate_models/rmts.rst +++ b/doc/_src_docs/surrogate_models/rmts.rst @@ -1,526 +1,526 @@ -Regularized minimal-energy tensor-product splines -================================================= - -Regularized minimal-energy tensor-product splines (RMTS) is a type of surrogate model for -low-dimensional problems with large datasets and where fast prediction is desired. -The underlying mathematical functions are tensor-product splines, -which limits RMTS to up to 4-D problems, or 5-D problems in certain cases. -On the other hand, tensor-product splines enable a very fast prediction time -that does not increase with the number of training points. -Unlike other methods like Kriging and radial basis functions, -RMTS is not susceptible to numerical issues when there is a large number of training points -or when there are points that are too close together. - -The prediction equation for RMTS is - -.. math :: - y = \mathbf{F}(\mathbf{x}) \mathbf{w} , - -where -:math:`\mathbf{x} \in \mathbb{R}^{nx}` is the prediction input vector, -:math:`y \in \mathbb{R}` is the prediction output, -:math:`\mathbf{w} \in \mathbb{R}^{nw}` is the vector of spline coefficients, -and -:math:`\mathbf{F}(\mathbf{x}) \in \mathbb{R}^{nw}` is the vector mapping the spline coefficients to the prediction output. - -RMTS computes the coefficients of the splines, :math:`\mathbf{w}`, by solving an energy minimization problem -subject to the conditions that the splines pass through the training points. -This is formulated as an unconstrained optimization problem -where the objective function consists of a term containing the second derivatives of the splines, -another term representing the approximation error for the training points, -and another term for regularization: - -.. math :: - - \begin{array}{r l} - \underset{\mathbf{w}}{\min} & \frac{1}{2} \mathbf{w}^T \mathbf{H} \mathbf{w} - + \frac{1}{2} \beta \mathbf{w}^T \mathbf{w} - \\ - & - + \frac{1}{2} \frac{1}{\alpha} - \sum_i^{nt} \left[ \mathbf{F}(\mathbf{xt}_i) \mathbf{w} - yt_i \right] ^ 2 - \end{array} , - -where -:math:`\mathbf{xt}_i \in \mathbb{R}^{nx}` is the input vector for the :math:`i` th training point, -:math:`yt_i \in \mathbb{R}` is the output value for the :math:`i` th training point, -:math:`\mathbf{H} \in \mathbb{R}^{nw \times nw}` is the matrix containing the second derivatives, -:math:`\mathbf{F}(\mathbf{xt}_i) \in \mathbb{R}^{nw}` is the vector mapping the spline coefficients to the :math:`i` th training output, -and :math:`\alpha` and :math:`\beta` are regularization coefficients. - -In problems with a large number of training points relative to the number of spline coefficients, -the energy minimization term is not necessary; -this term can be zero-ed by setting the reg_cons option to zero. -In problems with a small dataset, the energy minimization is necessary. -When the true function has high curvature, the energy minimization can be counterproductive -in the regions of high curvature. -This can be addressed by increasing the quadratic approximation term to one of higher order, -and using Newton's method to solve the nonlinear system that results. -The nonlinear formulation is given by - -.. math:: - - \begin{array}{r l} - \underset{\mathbf{w}}{\min} & \frac{1}{2} \mathbf{w}^T \mathbf{H} \mathbf{w} - + \frac{1}{2} \beta \mathbf{w}^T \mathbf{w} - \\ - & - + \frac{1}{2} \frac{1}{\alpha} - \sum_i^{nt} \left[ \mathbf{F}(\mathbf{xt}_i) \mathbf{w} - yt_i \right] ^ p - \end{array} - , - -where :math:`p` is the order given by the approx_order option. -The number of Newton iterations can be specified via the :code:`nonlinear_maxiter` option. - -RMTS is implemented in SMT with two choices of splines: - -1. B-splines (RMTB): RMTB uses B-splines with a uniform knot vector in each dimension. -The number of B-spline control points and the B-spline order in each dimension are options -that trade off efficiency and precision of the interpolant. - -2. Cubic Hermite splines (RMTC): RMTC divides the domain into tensor-product cubic elements. -For adjacent elements, the values and derivatives are continuous. -The number of elements in each dimension is an option that trades off efficiency and precision. - -In general, RMTB is the better choice when training time is the most important, -while RMTC is the better choice when accuracy of the interpolant is the most important. -More details of these methods are given in [1]_. - -.. [1] Hwang, J. T., & Martins, J. R. (2018). A fast-prediction surrogate model for large datasets. Aerospace Science and Technology, 75, 74-87. - -Usage (RMTB) ------------- - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.surrogate_models import RMTB - - xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) - yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0]) - - xlimits = np.array([[0.0, 4.0]]) - - sm = RMTB( - xlimits=xlimits, - order=4, - num_ctrl_pts=20, - energy_weight=1e-15, - regularization_weight=0.0, - ) - sm.set_training_values(xt, yt) - sm.train() - - num = 100 - x = np.linspace(0.0, 4.0, num) - y = sm.predict_values(x) - - plt.plot(xt, yt, "o") - plt.plot(x, y) - plt.xlabel("x") - plt.ylabel("y") - plt.legend(["Training data", "Prediction"]) - plt.show() - -:: - - ___________________________________________________________________________ - - RMTB - ___________________________________________________________________________ - - Problem size - - # training points. : 5 - - ___________________________________________________________________________ - - Training - - Training ... - Pre-computing matrices ... - Computing dof2coeff ... - Computing dof2coeff - done. Time (sec): 0.0000000 - Initializing Hessian ... - Initializing Hessian - done. Time (sec): 0.0000000 - Computing energy terms ... - Computing energy terms - done. Time (sec): 0.0000000 - Computing approximation terms ... - Computing approximation terms - done. Time (sec): 0.0000000 - Pre-computing matrices - done. Time (sec): 0.0000000 - Solving for degrees of freedom ... - Solving initial startup problem (n=20) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 1.549745600e+00 2.530000000e+00 - Iteration (num., iy, grad. norm, func.) : 0 0 1.395101781e-15 4.464186103e-16 - Solving for output 0 - done. Time (sec): 0.0000000 - Solving initial startup problem (n=20) - done. Time (sec): 0.0000000 - Solving nonlinear problem (n=20) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 1.531354982e-15 4.464186103e-16 - Solving for output 0 - done. Time (sec): 0.0000000 - Solving nonlinear problem (n=20) - done. Time (sec): 0.0000000 - Solving for degrees of freedom - done. Time (sec): 0.0000000 - Training - done. Time (sec): 0.0000000 - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0100148 - - Prediction time/pt. (sec) : 0.0001001 - - -.. figure:: rmts_Test_test_rmtb.png - :scale: 80 % - :align: center - -Usage (RMTC) ------------- - -.. code-block:: python - - import numpy as np - import matplotlib.pyplot as plt - - from smt.surrogate_models import RMTC - - xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) - yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0]) - - xlimits = np.array([[0.0, 4.0]]) - - sm = RMTC( - xlimits=xlimits, - num_elements=20, - energy_weight=1e-15, - regularization_weight=0.0, - ) - sm.set_training_values(xt, yt) - sm.train() - - num = 100 - x = np.linspace(0.0, 4.0, num) - y = sm.predict_values(x) - - plt.plot(xt, yt, "o") - plt.plot(x, y) - plt.xlabel("x") - plt.ylabel("y") - plt.legend(["Training data", "Prediction"]) - plt.show() - -:: - - ___________________________________________________________________________ - - RMTC - ___________________________________________________________________________ - - Problem size - - # training points. : 5 - - ___________________________________________________________________________ - - Training - - Training ... - Pre-computing matrices ... - Computing dof2coeff ... - Computing dof2coeff - done. Time (sec): 0.0000000 - Initializing Hessian ... - Initializing Hessian - done. Time (sec): 0.0000000 - Computing energy terms ... - Computing energy terms - done. Time (sec): 0.0020008 - Computing approximation terms ... - Computing approximation terms - done. Time (sec): 0.0000000 - Pre-computing matrices - done. Time (sec): 0.0020008 - Solving for degrees of freedom ... - Solving initial startup problem (n=42) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 2.249444376e+00 2.530000000e+00 - Iteration (num., iy, grad. norm, func.) : 0 0 2.004900347e-15 4.346868680e-16 - Solving for output 0 - done. Time (sec): 0.0030320 - Solving initial startup problem (n=42) - done. Time (sec): 0.0030320 - Solving nonlinear problem (n=42) ... - Solving for output 0 ... - Iteration (num., iy, grad. norm, func.) : 0 0 2.956393318e-15 4.346868680e-16 - Solving for output 0 - done. Time (sec): 0.0000000 - Solving nonlinear problem (n=42) - done. Time (sec): 0.0000000 - Solving for degrees of freedom - done. Time (sec): 0.0030320 - Training - done. Time (sec): 0.0053463 - ___________________________________________________________________________ - - Evaluation - - # eval points. : 100 - - Predicting ... - Predicting - done. Time (sec): 0.0000000 - - Prediction time/pt. (sec) : 0.0000000 - - -.. figure:: rmts_Test_test_rmtc.png - :scale: 80 % - :align: center - -Options (RMTB) --------------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - print_global - - True - - None - - ['bool'] - - Global print toggle. If False, all printing is suppressed - * - print_training - - True - - None - - ['bool'] - - Whether to print training information - * - print_prediction - - True - - None - - ['bool'] - - Whether to print prediction information - * - print_problem - - True - - None - - ['bool'] - - Whether to print problem information - * - print_solver - - True - - None - - ['bool'] - - Whether to print solver information - * - xlimits - - None - - None - - ['ndarray'] - - Lower/upper bounds in each dimension - ndarray [nx, 2] - * - smoothness - - 1.0 - - None - - ['Integral', 'float', 'tuple', 'list', 'ndarray'] - - Smoothness parameter in each dimension - length nx. None implies uniform - * - regularization_weight - - 1e-14 - - None - - ['Integral', 'float'] - - Weight of the term penalizing the norm of the spline coefficients. This is useful as an alternative to energy minimization when energy minimization makes the training time too long. - * - energy_weight - - 0.0001 - - None - - ['Integral', 'float'] - - The weight of the energy minimization terms - * - extrapolate - - False - - None - - ['bool'] - - Whether to perform linear extrapolation for external evaluation points - * - min_energy - - True - - None - - ['bool'] - - Whether to perform energy minimization - * - approx_order - - 4 - - None - - ['Integral'] - - Exponent in the approximation term - * - solver - - krylov - - ['krylov-dense', 'dense-lu', 'dense-chol', 'lu', 'ilu', 'krylov', 'krylov-lu', 'krylov-mg', 'gs', 'jacobi', 'mg', 'null'] - - ['LinearSolver'] - - Linear solver - * - derivative_solver - - krylov - - ['krylov-dense', 'dense-lu', 'dense-chol', 'lu', 'ilu', 'krylov', 'krylov-lu', 'krylov-mg', 'gs', 'jacobi', 'mg', 'null'] - - ['LinearSolver'] - - Linear solver used for computing output derivatives (dy_dyt) - * - grad_weight - - 0.5 - - None - - ['Integral', 'float'] - - Weight on gradient training data - * - solver_tolerance - - 1e-12 - - None - - ['Integral', 'float'] - - Convergence tolerance for the nonlinear solver - * - nonlinear_maxiter - - 10 - - None - - ['Integral'] - - Maximum number of nonlinear solver iterations - * - line_search - - backtracking - - ['backtracking', 'bracketed', 'quadratic', 'cubic', 'null'] - - ['LineSearch'] - - Line search algorithm - * - save_energy_terms - - False - - None - - ['bool'] - - Whether to cache energy terms in the data_dir directory - * - data_dir - - None - - [None] - - ['str'] - - Directory for loading / saving cached data; None means do not save or load - * - max_print_depth - - 5 - - None - - ['Integral'] - - Maximum depth (level of nesting) to print operation descriptions and times - * - order - - 3 - - None - - ['Integral', 'tuple', 'list', 'ndarray'] - - B-spline order in each dimension - length [nx] - * - num_ctrl_pts - - 15 - - None - - ['Integral', 'tuple', 'list', 'ndarray'] - - # B-spline control points in each dimension - length [nx] - -Options (RMTC) --------------- - -.. list-table:: List of options - :header-rows: 1 - :widths: 15, 10, 20, 20, 30 - :stub-columns: 0 - - * - Option - - Default - - Acceptable values - - Acceptable types - - Description - * - print_global - - True - - None - - ['bool'] - - Global print toggle. If False, all printing is suppressed - * - print_training - - True - - None - - ['bool'] - - Whether to print training information - * - print_prediction - - True - - None - - ['bool'] - - Whether to print prediction information - * - print_problem - - True - - None - - ['bool'] - - Whether to print problem information - * - print_solver - - True - - None - - ['bool'] - - Whether to print solver information - * - xlimits - - None - - None - - ['ndarray'] - - Lower/upper bounds in each dimension - ndarray [nx, 2] - * - smoothness - - 1.0 - - None - - ['Integral', 'float', 'tuple', 'list', 'ndarray'] - - Smoothness parameter in each dimension - length nx. None implies uniform - * - regularization_weight - - 1e-14 - - None - - ['Integral', 'float'] - - Weight of the term penalizing the norm of the spline coefficients. This is useful as an alternative to energy minimization when energy minimization makes the training time too long. - * - energy_weight - - 0.0001 - - None - - ['Integral', 'float'] - - The weight of the energy minimization terms - * - extrapolate - - False - - None - - ['bool'] - - Whether to perform linear extrapolation for external evaluation points - * - min_energy - - True - - None - - ['bool'] - - Whether to perform energy minimization - * - approx_order - - 4 - - None - - ['Integral'] - - Exponent in the approximation term - * - solver - - krylov - - ['krylov-dense', 'dense-lu', 'dense-chol', 'lu', 'ilu', 'krylov', 'krylov-lu', 'krylov-mg', 'gs', 'jacobi', 'mg', 'null'] - - ['LinearSolver'] - - Linear solver - * - derivative_solver - - krylov - - ['krylov-dense', 'dense-lu', 'dense-chol', 'lu', 'ilu', 'krylov', 'krylov-lu', 'krylov-mg', 'gs', 'jacobi', 'mg', 'null'] - - ['LinearSolver'] - - Linear solver used for computing output derivatives (dy_dyt) - * - grad_weight - - 0.5 - - None - - ['Integral', 'float'] - - Weight on gradient training data - * - solver_tolerance - - 1e-12 - - None - - ['Integral', 'float'] - - Convergence tolerance for the nonlinear solver - * - nonlinear_maxiter - - 10 - - None - - ['Integral'] - - Maximum number of nonlinear solver iterations - * - line_search - - backtracking - - ['backtracking', 'bracketed', 'quadratic', 'cubic', 'null'] - - ['LineSearch'] - - Line search algorithm - * - save_energy_terms - - False - - None - - ['bool'] - - Whether to cache energy terms in the data_dir directory - * - data_dir - - None - - [None] - - ['str'] - - Directory for loading / saving cached data; None means do not save or load - * - max_print_depth - - 5 - - None - - ['Integral'] - - Maximum depth (level of nesting) to print operation descriptions and times - * - num_elements - - 4 - - None - - ['Integral', 'list', 'ndarray'] - - # elements in each dimension - ndarray [nx] +Regularized minimal-energy tensor-product splines +================================================= + +Regularized minimal-energy tensor-product splines (RMTS) is a type of surrogate model for +low-dimensional problems with large datasets and where fast prediction is desired. +The underlying mathematical functions are tensor-product splines, +which limits RMTS to up to 4-D problems, or 5-D problems in certain cases. +On the other hand, tensor-product splines enable a very fast prediction time +that does not increase with the number of training points. +Unlike other methods like Kriging and radial basis functions, +RMTS is not susceptible to numerical issues when there is a large number of training points +or when there are points that are too close together. + +The prediction equation for RMTS is + +.. math :: + y = \mathbf{F}(\mathbf{x}) \mathbf{w} , + +where +:math:`\mathbf{x} \in \mathbb{R}^{nx}` is the prediction input vector, +:math:`y \in \mathbb{R}` is the prediction output, +:math:`\mathbf{w} \in \mathbb{R}^{nw}` is the vector of spline coefficients, +and +:math:`\mathbf{F}(\mathbf{x}) \in \mathbb{R}^{nw}` is the vector mapping the spline coefficients to the prediction output. + +RMTS computes the coefficients of the splines, :math:`\mathbf{w}`, by solving an energy minimization problem +subject to the conditions that the splines pass through the training points. +This is formulated as an unconstrained optimization problem +where the objective function consists of a term containing the second derivatives of the splines, +another term representing the approximation error for the training points, +and another term for regularization: + +.. math :: + + \begin{array}{r l} + \underset{\mathbf{w}}{\min} & \frac{1}{2} \mathbf{w}^T \mathbf{H} \mathbf{w} + + \frac{1}{2} \beta \mathbf{w}^T \mathbf{w} + \\ + & + + \frac{1}{2} \frac{1}{\alpha} + \sum_i^{nt} \left[ \mathbf{F}(\mathbf{xt}_i) \mathbf{w} - yt_i \right] ^ 2 + \end{array} , + +where +:math:`\mathbf{xt}_i \in \mathbb{R}^{nx}` is the input vector for the :math:`i` th training point, +:math:`yt_i \in \mathbb{R}` is the output value for the :math:`i` th training point, +:math:`\mathbf{H} \in \mathbb{R}^{nw \times nw}` is the matrix containing the second derivatives, +:math:`\mathbf{F}(\mathbf{xt}_i) \in \mathbb{R}^{nw}` is the vector mapping the spline coefficients to the :math:`i` th training output, +and :math:`\alpha` and :math:`\beta` are regularization coefficients. + +In problems with a large number of training points relative to the number of spline coefficients, +the energy minimization term is not necessary; +this term can be zero-ed by setting the reg_cons option to zero. +In problems with a small dataset, the energy minimization is necessary. +When the true function has high curvature, the energy minimization can be counterproductive +in the regions of high curvature. +This can be addressed by increasing the quadratic approximation term to one of higher order, +and using Newton's method to solve the nonlinear system that results. +The nonlinear formulation is given by + +.. math:: + + \begin{array}{r l} + \underset{\mathbf{w}}{\min} & \frac{1}{2} \mathbf{w}^T \mathbf{H} \mathbf{w} + + \frac{1}{2} \beta \mathbf{w}^T \mathbf{w} + \\ + & + + \frac{1}{2} \frac{1}{\alpha} + \sum_i^{nt} \left[ \mathbf{F}(\mathbf{xt}_i) \mathbf{w} - yt_i \right] ^ p + \end{array} + , + +where :math:`p` is the order given by the approx_order option. +The number of Newton iterations can be specified via the :code:`nonlinear_maxiter` option. + +RMTS is implemented in SMT with two choices of splines: + +1. B-splines (RMTB): RMTB uses B-splines with a uniform knot vector in each dimension. +The number of B-spline control points and the B-spline order in each dimension are options +that trade off efficiency and precision of the interpolant. + +2. Cubic Hermite splines (RMTC): RMTC divides the domain into tensor-product cubic elements. +For adjacent elements, the values and derivatives are continuous. +The number of elements in each dimension is an option that trades off efficiency and precision. + +In general, RMTB is the better choice when training time is the most important, +while RMTC is the better choice when accuracy of the interpolant is the most important. +More details of these methods are given in [1]_. + +.. [1] Hwang, J. T., & Martins, J. R. (2018). A fast-prediction surrogate model for large datasets. Aerospace Science and Technology, 75, 74-87. + +Usage (RMTB) +------------ + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.surrogate_models import RMTB + + xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) + yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0]) + + xlimits = np.array([[0.0, 4.0]]) + + sm = RMTB( + xlimits=xlimits, + order=4, + num_ctrl_pts=20, + energy_weight=1e-15, + regularization_weight=0.0, + ) + sm.set_training_values(xt, yt) + sm.train() + + num = 100 + x = np.linspace(0.0, 4.0, num) + y = sm.predict_values(x) + + plt.plot(xt, yt, "o") + plt.plot(x, y) + plt.xlabel("x") + plt.ylabel("y") + plt.legend(["Training data", "Prediction"]) + plt.show() + +:: + + ___________________________________________________________________________ + + RMTB + ___________________________________________________________________________ + + Problem size + + # training points. : 5 + + ___________________________________________________________________________ + + Training + + Training ... + Pre-computing matrices ... + Computing dof2coeff ... + Computing dof2coeff - done. Time (sec): 0.0000000 + Initializing Hessian ... + Initializing Hessian - done. Time (sec): 0.0000000 + Computing energy terms ... + Computing energy terms - done. Time (sec): 0.0009975 + Computing approximation terms ... + Computing approximation terms - done. Time (sec): 0.0000000 + Pre-computing matrices - done. Time (sec): 0.0009975 + Solving for degrees of freedom ... + Solving initial startup problem (n=20) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 1.549745600e+00 2.530000000e+00 + Iteration (num., iy, grad. norm, func.) : 0 0 1.339039325e-15 4.464522395e-16 + Solving for output 0 - done. Time (sec): 0.0019948 + Solving initial startup problem (n=20) - done. Time (sec): 0.0019948 + Solving nonlinear problem (n=20) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 1.533514592e-15 4.464522395e-16 + Solving for output 0 - done. Time (sec): 0.0009975 + Solving nonlinear problem (n=20) - done. Time (sec): 0.0009975 + Solving for degrees of freedom - done. Time (sec): 0.0029924 + Training - done. Time (sec): 0.0049870 + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + +.. figure:: rmts_Test_test_rmtb.png + :scale: 80 % + :align: center + +Usage (RMTC) +------------ + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from smt.surrogate_models import RMTC + + xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) + yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0]) + + xlimits = np.array([[0.0, 4.0]]) + + sm = RMTC( + xlimits=xlimits, + num_elements=20, + energy_weight=1e-15, + regularization_weight=0.0, + ) + sm.set_training_values(xt, yt) + sm.train() + + num = 100 + x = np.linspace(0.0, 4.0, num) + y = sm.predict_values(x) + + plt.plot(xt, yt, "o") + plt.plot(x, y) + plt.xlabel("x") + plt.ylabel("y") + plt.legend(["Training data", "Prediction"]) + plt.show() + +:: + + ___________________________________________________________________________ + + RMTC + ___________________________________________________________________________ + + Problem size + + # training points. : 5 + + ___________________________________________________________________________ + + Training + + Training ... + Pre-computing matrices ... + Computing dof2coeff ... + Computing dof2coeff - done. Time (sec): 0.0000000 + Initializing Hessian ... + Initializing Hessian - done. Time (sec): 0.0009644 + Computing energy terms ... + Computing energy terms - done. Time (sec): 0.0009973 + Computing approximation terms ... + Computing approximation terms - done. Time (sec): 0.0000000 + Pre-computing matrices - done. Time (sec): 0.0019617 + Solving for degrees of freedom ... + Solving initial startup problem (n=42) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 2.249444376e+00 2.530000000e+00 + Iteration (num., iy, grad. norm, func.) : 0 0 2.031017841e-15 4.346868680e-16 + Solving for output 0 - done. Time (sec): 0.0029922 + Solving initial startup problem (n=42) - done. Time (sec): 0.0029922 + Solving nonlinear problem (n=42) ... + Solving for output 0 ... + Iteration (num., iy, grad. norm, func.) : 0 0 2.956393318e-15 4.346868680e-16 + Solving for output 0 - done. Time (sec): 0.0000000 + Solving nonlinear problem (n=42) - done. Time (sec): 0.0000000 + Solving for degrees of freedom - done. Time (sec): 0.0029922 + Training - done. Time (sec): 0.0059485 + ___________________________________________________________________________ + + Evaluation + + # eval points. : 100 + + Predicting ... + Predicting - done. Time (sec): 0.0000000 + + Prediction time/pt. (sec) : 0.0000000 + + +.. figure:: rmts_Test_test_rmtc.png + :scale: 80 % + :align: center + +Options (RMTB) +-------------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - print_global + - True + - None + - ['bool'] + - Global print toggle. If False, all printing is suppressed + * - print_training + - True + - None + - ['bool'] + - Whether to print training information + * - print_prediction + - True + - None + - ['bool'] + - Whether to print prediction information + * - print_problem + - True + - None + - ['bool'] + - Whether to print problem information + * - print_solver + - True + - None + - ['bool'] + - Whether to print solver information + * - xlimits + - None + - None + - ['ndarray'] + - Lower/upper bounds in each dimension - ndarray [nx, 2] + * - smoothness + - 1.0 + - None + - ['Integral', 'float', 'tuple', 'list', 'ndarray'] + - Smoothness parameter in each dimension - length nx. None implies uniform + * - regularization_weight + - 1e-14 + - None + - ['Integral', 'float'] + - Weight of the term penalizing the norm of the spline coefficients. This is useful as an alternative to energy minimization when energy minimization makes the training time too long. + * - energy_weight + - 0.0001 + - None + - ['Integral', 'float'] + - The weight of the energy minimization terms + * - extrapolate + - False + - None + - ['bool'] + - Whether to perform linear extrapolation for external evaluation points + * - min_energy + - True + - None + - ['bool'] + - Whether to perform energy minimization + * - approx_order + - 4 + - None + - ['Integral'] + - Exponent in the approximation term + * - solver + - krylov + - ['krylov-dense', 'dense-lu', 'dense-chol', 'lu', 'ilu', 'krylov', 'krylov-lu', 'krylov-mg', 'gs', 'jacobi', 'mg', 'null'] + - ['LinearSolver'] + - Linear solver + * - derivative_solver + - krylov + - ['krylov-dense', 'dense-lu', 'dense-chol', 'lu', 'ilu', 'krylov', 'krylov-lu', 'krylov-mg', 'gs', 'jacobi', 'mg', 'null'] + - ['LinearSolver'] + - Linear solver used for computing output derivatives (dy_dyt) + * - grad_weight + - 0.5 + - None + - ['Integral', 'float'] + - Weight on gradient training data + * - solver_tolerance + - 1e-12 + - None + - ['Integral', 'float'] + - Convergence tolerance for the nonlinear solver + * - nonlinear_maxiter + - 10 + - None + - ['Integral'] + - Maximum number of nonlinear solver iterations + * - line_search + - backtracking + - ['backtracking', 'bracketed', 'quadratic', 'cubic', 'null'] + - ['LineSearch'] + - Line search algorithm + * - save_energy_terms + - False + - None + - ['bool'] + - Whether to cache energy terms in the data_dir directory + * - data_dir + - None + - [None] + - ['str'] + - Directory for loading / saving cached data; None means do not save or load + * - max_print_depth + - 5 + - None + - ['Integral'] + - Maximum depth (level of nesting) to print operation descriptions and times + * - order + - 3 + - None + - ['Integral', 'tuple', 'list', 'ndarray'] + - B-spline order in each dimension - length [nx] + * - num_ctrl_pts + - 15 + - None + - ['Integral', 'tuple', 'list', 'ndarray'] + - # B-spline control points in each dimension - length [nx] + +Options (RMTC) +-------------- + +.. list-table:: List of options + :header-rows: 1 + :widths: 15, 10, 20, 20, 30 + :stub-columns: 0 + + * - Option + - Default + - Acceptable values + - Acceptable types + - Description + * - print_global + - True + - None + - ['bool'] + - Global print toggle. If False, all printing is suppressed + * - print_training + - True + - None + - ['bool'] + - Whether to print training information + * - print_prediction + - True + - None + - ['bool'] + - Whether to print prediction information + * - print_problem + - True + - None + - ['bool'] + - Whether to print problem information + * - print_solver + - True + - None + - ['bool'] + - Whether to print solver information + * - xlimits + - None + - None + - ['ndarray'] + - Lower/upper bounds in each dimension - ndarray [nx, 2] + * - smoothness + - 1.0 + - None + - ['Integral', 'float', 'tuple', 'list', 'ndarray'] + - Smoothness parameter in each dimension - length nx. None implies uniform + * - regularization_weight + - 1e-14 + - None + - ['Integral', 'float'] + - Weight of the term penalizing the norm of the spline coefficients. This is useful as an alternative to energy minimization when energy minimization makes the training time too long. + * - energy_weight + - 0.0001 + - None + - ['Integral', 'float'] + - The weight of the energy minimization terms + * - extrapolate + - False + - None + - ['bool'] + - Whether to perform linear extrapolation for external evaluation points + * - min_energy + - True + - None + - ['bool'] + - Whether to perform energy minimization + * - approx_order + - 4 + - None + - ['Integral'] + - Exponent in the approximation term + * - solver + - krylov + - ['krylov-dense', 'dense-lu', 'dense-chol', 'lu', 'ilu', 'krylov', 'krylov-lu', 'krylov-mg', 'gs', 'jacobi', 'mg', 'null'] + - ['LinearSolver'] + - Linear solver + * - derivative_solver + - krylov + - ['krylov-dense', 'dense-lu', 'dense-chol', 'lu', 'ilu', 'krylov', 'krylov-lu', 'krylov-mg', 'gs', 'jacobi', 'mg', 'null'] + - ['LinearSolver'] + - Linear solver used for computing output derivatives (dy_dyt) + * - grad_weight + - 0.5 + - None + - ['Integral', 'float'] + - Weight on gradient training data + * - solver_tolerance + - 1e-12 + - None + - ['Integral', 'float'] + - Convergence tolerance for the nonlinear solver + * - nonlinear_maxiter + - 10 + - None + - ['Integral'] + - Maximum number of nonlinear solver iterations + * - line_search + - backtracking + - ['backtracking', 'bracketed', 'quadratic', 'cubic', 'null'] + - ['LineSearch'] + - Line search algorithm + * - save_energy_terms + - False + - None + - ['bool'] + - Whether to cache energy terms in the data_dir directory + * - data_dir + - None + - [None] + - ['str'] + - Directory for loading / saving cached data; None means do not save or load + * - max_print_depth + - 5 + - None + - ['Integral'] + - Maximum depth (level of nesting) to print operation descriptions and times + * - num_elements + - 4 + - None + - ['Integral', 'list', 'ndarray'] + - # elements in each dimension - ndarray [nx] diff --git a/doc/_src_docs/surrogate_models/rmts_Test_test_rmtb.png b/doc/_src_docs/surrogate_models/rmts_Test_test_rmtb.png index 82d3aeba7..6680f33cd 100644 Binary files a/doc/_src_docs/surrogate_models/rmts_Test_test_rmtb.png and b/doc/_src_docs/surrogate_models/rmts_Test_test_rmtb.png differ diff --git a/doc/_src_docs/surrogate_models_Test_test_rbf.png b/doc/_src_docs/surrogate_models_Test_test_rbf.png index f7b01f262..da4ef1cdb 100644 Binary files a/doc/_src_docs/surrogate_models_Test_test_rbf.png and b/doc/_src_docs/surrogate_models_Test_test_rbf.png differ diff --git a/doc/conf.py b/doc/conf.py index 5601b7d83..7e2dd550c 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -95,9 +95,15 @@ # further. For a list of options available for each theme, see the # documentation. # -# html_theme_options = {} -html_logo = "logos.png" +html_theme_options = { + "rightsidebar": False, + "sidebarwidth": 250, + "body_min_width": 1100, + "body_max_width": 1100, +} + +html_logo = "smt_logo.png" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, diff --git a/doc/index.rst b/doc/index.rst index fca7bf255..37373a099 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -1,76 +1,80 @@ -.. SMT documentation master file, created by - sphinx-quickstart on Sun Aug 6 19:36:14 2017. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -SMT: Surrogate Modeling Toolbox -------------------------------- - -The surrogate modeling toolbox (SMT) is an open-source Python package consisting of libraries of surrogate modeling methods (e.g., radial basis functions, kriging), sampling methods, and benchmarking problems. -SMT is designed to make it easy for developers to implement new surrogate models in a well-tested and well-document platform, and for users to have a library of surrogate modeling methods with which to use and compare methods. - -The code is available open-source on `GitHub `_. - -Cite us -------- -To cite SMT: M. A. Bouhlel and J. T. Hwang and N. Bartoli and R. Lafage and J. Morlier and J. R. R. A. Martins. - -`A Python surrogate modeling framework with derivatives. Advances in Engineering Software, 2019 `_. - -.. code-block:: none - - @article{SMT2019, - Author = {Mohamed Amine Bouhlel and John T. Hwang and Nathalie Bartoli and Rémi Lafage and Joseph Morlier and Joaquim R. R. A. Martins}, - Journal = {Advances in Engineering Software}, - Title = {A Python surrogate modeling framework with derivatives}, - pages = {102662}, - year = {2019}, - issn = {0965-9978}, - doi = {https://doi.org/10.1016/j.advengsoft.2019.03.005}, - Year = {2019}} - - -Focus on derivatives --------------------- - -SMT is meant to be a general library for surrogate modeling (also known as metamodeling, interpolation, and regression), but its distinguishing characteristic is its focus on derivatives, e.g., to be used for gradient-based optimization. -A surrogate model can be represented mathematically as - -.. math :: - y = f(\mathbf{x}, \mathbf{xt}, \mathbf{yt}), - -where -:math:`\mathbf{xt} \in \mathbb{R}^{nt \times nx}` contains the training inputs, -:math:`\mathbf{yt} \in \mathbb{R}^{nt}` contains the training outputs, -:math:`\mathbf{x} \in \mathbb{R}^{nx}` contains the prediction inputs, -and -:math:`y \in \mathbb{R}` contains the prediction outputs. -There are three types of derivatives of interest in SMT: - -1. Derivatives (:math:`{dy}/{dx}`): derivatives of predicted outputs with respect to the inputs at which the model is evaluated. -2. Training derivatives (:math:`{dyt}/{dxt}`): derivatives of training outputs, given as part of the training data set, e.g., for gradient-enhanced kriging. -3. Output derivatives (:math:`{dy}/{dyt}`): derivatives of predicted outputs with respect to training outputs, representing how the prediction changes if the training outputs change and the surrogate model is re-trained. - -Not all surrogate modeling methods support or are required to support all three types of derivatives; all are optional. - -Documentation contents ----------------------- - -.. toctree:: - :maxdepth: 2 - :titlesonly: - - _src_docs/getting_started - _src_docs/surrogate_models - _src_docs/problems - _src_docs/sampling_methods - _src_docs/examples - _src_docs/applications - _src_docs/dev_docs - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` +.. SMT documentation master file, created by + sphinx-quickstart on Sun Aug 6 19:36:14 2017. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +.. image:: logos_band.png + :height: 120 + :width: 1250 + +SMT: Surrogate Modeling Toolbox +------------------------------- + +The surrogate modeling toolbox (SMT) is an open-source Python package consisting of libraries of surrogate modeling methods (e.g., radial basis functions, kriging), sampling methods, and benchmarking problems. +SMT is designed to make it easy for developers to implement new surrogate models in a well-tested and well-document platform, and for users to have a library of surrogate modeling methods with which to use and compare methods. + +The code is available open-source on `GitHub `_. + +Cite us +------- +To cite SMT: M. A. Bouhlel and J. T. Hwang and N. Bartoli and R. Lafage and J. Morlier and J. R. R. A. Martins. + +`A Python surrogate modeling framework with derivatives. Advances in Engineering Software, 2019 `_. + +.. code-block:: none + + @article{SMT2019, + Author = {Mohamed Amine Bouhlel and John T. Hwang and Nathalie Bartoli and Rémi Lafage and Joseph Morlier and Joaquim R. R. A. Martins}, + Journal = {Advances in Engineering Software}, + Title = {A Python surrogate modeling framework with derivatives}, + pages = {102662}, + year = {2019}, + issn = {0965-9978}, + doi = {https://doi.org/10.1016/j.advengsoft.2019.03.005}, + Year = {2019}} + + +Focus on derivatives +-------------------- + +SMT is meant to be a general library for surrogate modeling (also known as metamodeling, interpolation, and regression), but its distinguishing characteristic is its focus on derivatives, e.g., to be used for gradient-based optimization. +A surrogate model can be represented mathematically as + +.. math :: + y = f(\mathbf{x}, \mathbf{xt}, \mathbf{yt}), + +where +:math:`\mathbf{xt} \in \mathbb{R}^{nt \times nx}` contains the training inputs, +:math:`\mathbf{yt} \in \mathbb{R}^{nt}` contains the training outputs, +:math:`\mathbf{x} \in \mathbb{R}^{nx}` contains the prediction inputs, +and +:math:`y \in \mathbb{R}` contains the prediction outputs. +There are three types of derivatives of interest in SMT: + +1. Derivatives (:math:`{dy}/{dx}`): derivatives of predicted outputs with respect to the inputs at which the model is evaluated. +2. Training derivatives (:math:`{dyt}/{dxt}`): derivatives of training outputs, given as part of the training data set, e.g., for gradient-enhanced kriging. +3. Output derivatives (:math:`{dy}/{dyt}`): derivatives of predicted outputs with respect to training outputs, representing how the prediction changes if the training outputs change and the surrogate model is re-trained. + +Not all surrogate modeling methods support or are required to support all three types of derivatives; all are optional. + +Documentation contents +---------------------- + +.. toctree:: + :maxdepth: 2 + :titlesonly: + + _src_docs/getting_started + _src_docs/surrogate_models + _src_docs/problems + _src_docs/sampling_methods + _src_docs/examples + _src_docs/applications + _src_docs/dev_docs + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/doc/index.rstx b/doc/index.rstx index fca7bf255..dc8401cd8 100644 --- a/doc/index.rstx +++ b/doc/index.rstx @@ -3,6 +3,10 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. +.. image:: logos_band.png + :height: 120 + :width: 1250 + SMT: Surrogate Modeling Toolbox ------------------------------- diff --git a/doc/logos_band.png b/doc/logos_band.png new file mode 100644 index 000000000..9913dbbf0 Binary files /dev/null and b/doc/logos_band.png differ diff --git a/smt/applications/mixed_integer.py b/smt/applications/mixed_integer.py index 153cc8327..469ae946b 100644 --- a/smt/applications/mixed_integer.py +++ b/smt/applications/mixed_integer.py @@ -88,7 +88,7 @@ def __init__( ---------- xspecs : x specifications XSpecs xtypes: x types list - x type specification: list of either FLOAT, ORD or (ENUM, n) spec. + x types specification: list of either FLOAT, ORD or (ENUM, n) spec. xlimits: array-like bounds of x features xroles: x roles list @@ -189,7 +189,7 @@ def __init__( ---------- xspecs : x specifications XSpecs xtypes: x types list - x type specification: list of either FLOAT, ORD or (ENUM, n) spec. + x types specification: list of either FLOAT, ORD or (ENUM, n) spec. xlimits: array-like bounds of x features xroles: x roles list @@ -301,6 +301,8 @@ def __init__(self, xspecs, work_in_folded_space=True): x types specification: list of either FLOAT, ORD or (ENUM, n) spec. xlimits: array-like bounds of x features + xroles: x roles list + x roles specification work_in_folded_space: bool whether x data are in given in folded space (enum indexes) or not (enum masks) categorical_kernel: string diff --git a/smt/applications/tests/test_mixed_integer.py b/smt/applications/tests/test_mixed_integer.py index 854d0eacf..f46f1876a 100644 --- a/smt/applications/tests/test_mixed_integer.py +++ b/smt/applications/tests/test_mixed_integer.py @@ -1,1437 +1,1628 @@ -""" -Created on Tue Oct 12 10:48:01 2021 -@author: psaves -""" - -import unittest -import numpy as np -import matplotlib -import itertools - -matplotlib.use("Agg") - -from smt.utils.kriging import XSpecs - -from smt.applications.mixed_integer import ( - MixedIntegerContext, - MixedIntegerSamplingMethod, - MixedIntegerKrigingModel, -) -from smt.utils.mixed_integer import ( - unfold_xlimits_with_continuous_limits, - fold_with_enum_index, - unfold_with_enum_mask, - compute_unfolded_dimension, - cast_to_enum_value, - cast_to_mixed_integer, - cast_to_discrete_values, - encode_with_enum_index, -) -from smt.problems import Sphere -from smt.sampling_methods import LHS -from smt.surrogate_models import ( - KRG, - KPLS, - QP, - XType, - XRole, - MixIntKernelType, -) - - -class TestMixedInteger(unittest.TestCase): - def test_krg_mixed_3D_INT(self): - xtypes = [XType.FLOAT, (XType.ENUM, 3), XType.ORD] - xlimits = [[-10, 10], ["blue", "red", "green"], [-10, 10]] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - mixint = MixedIntegerContext(xspecs=xspecs) - - sm = mixint.build_kriging_model(KRG(print_prediction=False)) - sampling = mixint.build_sampling_method(LHS, criterion="m") - - fun = Sphere(ndim=3) - xt = sampling(20) - yt = fun(xt) - sm.set_training_values(xt, yt) - sm.train() - - eq_check = True - for i in range(xt.shape[0]): - if abs(float(xt[i, :][2]) - int(float(xt[i, :][2]))) > 10e-8: - eq_check = False - if not (xt[i, :][1] == 0 or xt[i, :][1] == 1 or xt[i, :][1] == 2): - eq_check = False - self.assertTrue(eq_check) - - def test_krg_mixed_3D(self): - xtypes = [XType.FLOAT, (XType.ENUM, 3), XType.ORD] - xlimits = [[-10, 10], ["blue", "red", "green"], [-10, 10]] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - mixint = MixedIntegerContext(xspecs=xspecs) - - sm = mixint.build_kriging_model(KRG(print_prediction=False)) - sampling = mixint.build_sampling_method(LHS, criterion="m") - - fun = Sphere(ndim=3) - xt = sampling(20) - yt = fun(xt) - sm.set_training_values(xt, yt) - sm.train() - - eq_check = True - for i in range(xt.shape[0]): - if abs(float(xt[i, :][2]) - int(float(xt[i, :][2]))) > 10e-8: - eq_check = False - if not (xt[i, :][1] == 0 or xt[i, :][1] == 1 or xt[i, :][1] == 2): - eq_check = False - self.assertTrue(eq_check) - - def test_krg_mixed_3D_bad_regr(self): - xtypes = [XType.FLOAT, (XType.ENUM, 3), XType.ORD] - xlimits = [[-10, 10], ["blue", "red", "green"], [-10, 10]] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - mixint = MixedIntegerContext(xspecs=xspecs) - with self.assertRaises(ValueError): - sm = mixint.build_kriging_model(KRG(print_prediction=False, poly="linear")) - - def test_qp_mixed_2D_INT(self): - xtypes = [XType.FLOAT, XType.ORD] - xlimits = [[-10, 10], [-10, 10]] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - mixint = MixedIntegerContext(xspecs=xspecs) - sm = mixint.build_surrogate_model(QP(print_prediction=False)) - sampling = mixint.build_sampling_method(LHS, criterion="m") - - fun = Sphere(ndim=2) - xt = sampling(10) - yt = fun(xt) - sm.set_training_values(xt, yt) - sm.train() - - eq_check = True - for i in range(xt.shape[0]): - if abs(float(xt[i, :][1]) - int(float(xt[i, :][1]))) > 10e-8: - eq_check = False - self.assertTrue(eq_check) - - def test_compute_unfolded_dimension(self): - xtypes = [XType.FLOAT, (XType.ENUM, 2)] - self.assertEqual(3, compute_unfolded_dimension(xtypes)) - - def test_unfold_with_enum_mask(self): - xtypes = [XType.FLOAT, (XType.ENUM, 2)] - x = np.array([[1.5, 1], [1.5, 0], [1.5, 1]]) - expected = [[1.5, 0, 1], [1.5, 1, 0], [1.5, 0, 1]] - self.assertListEqual(expected, unfold_with_enum_mask(xtypes, x).tolist()) - - def test_unfold_with_enum_mask_with_enum_first(self): - xtypes = [(XType.ENUM, 2), XType.FLOAT] - x = np.array([[1, 1.5], [0, 1.5], [1, 1.5]]) - expected = [[0, 1, 1.5], [1, 0, 1.5], [0, 1, 1.5]] - self.assertListEqual(expected, unfold_with_enum_mask(xtypes, x).tolist()) - - def test_fold_with_enum_index(self): - xtypes = [XType.FLOAT, (XType.ENUM, 2)] - x = np.array([[1.5, 0, 1], [1.5, 1, 0], [1.5, 0, 1]]) - expected = [[1.5, 1], [1.5, 0], [1.5, 1]] - self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist()) - - def test_fold_with_enum_index_with_list(self): - xtypes = [XType.FLOAT, (XType.ENUM, 2)] - expected = [[1.5, 1]] - x = np.array([1.5, 0, 1]) - self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist()) - x = [1.5, 0, 1] - self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist()) - - def test_cast_to_enum_value(self): - xlimits = [[0.0, 4.0], ["blue", "red"]] - x_col = 1 - enum_indexes = [1, 1, 0, 1, 0] - expected = ["red", "red", "blue", "red", "blue"] - self.assertListEqual(expected, cast_to_enum_value(xlimits, x_col, enum_indexes)) - - def test_unfolded_xlimits_type(self): - xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 2), XType.ORD] - xlimits = np.array([[-5, 5], ["2", "3"], ["4", "5"], [0, 2]]) - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - sampling = MixedIntegerSamplingMethod(LHS, xspecs, criterion="ese") - doe = sampling(10) - self.assertEqual((10, 4), doe.shape) - - def test_cast_to_mixed_integer(self): - xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] - xlimits = np.array( - [[-5, 5], ["blue", "red"], ["short", "medium", "long"], [0, 2]], - dtype="object", - ) - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - x = np.array([1.5, 0, 2, 1.1]) - self.assertEqual([1.5, "blue", "long", 1], cast_to_mixed_integer(xspecs, x)) - - def test_encode_with_enum_index(self): - xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] - xlimits = np.array( - [[-5, 5], ["blue", "red"], ["short", "medium", "long"], [0, 2]], - dtype="object", - ) - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - x = [1.5, "blue", "long", 1] - self.assertEqual( - np.array_equal( - np.array([1.5, 0, 2, 1]), - encode_with_enum_index(xspecs, x), - ), - True, - ) - - def test_unfold_xlimits_with_continuous_limits(self): - xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] - xlimits = np.array( - [[-5, 5], ["blue", "red"], ["short", "medium", "long"], [0, 2]], - dtype="object", - ) - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - l = unfold_xlimits_with_continuous_limits(xspecs) - self.assertEqual( - np.array_equal( - [[-5, 5], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 2]], - unfold_xlimits_with_continuous_limits(xspecs), - ), - True, - ) - - def test_unfold_xlimits_with_continuous_limits_and_ordinal_values(self): - xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] - xlimits = np.array( - [[-5, 5], ["blue", "red"], ["short", "medium", "long"], ["0", "3", "4"]], - dtype="object", - ) - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - l = unfold_xlimits_with_continuous_limits(xspecs) - - self.assertEqual( - np.array_equal( - [[-5, 5], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 4]], - unfold_xlimits_with_continuous_limits(xspecs), - ), - True, - ) - - def test_cast_to_discrete_values(self): - xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] - xlimits = np.array( - [[-5, 5], ["blue", "red"], ["short", "medium", "long"], [0, 4]], - dtype="object", - ) - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 3.1]]) - - self.assertEqual( - np.array_equal( - np.array([[2.6, 0, 1, 0, 0, 1, 3]]), - cast_to_discrete_values(xspecs, True, x), - ), - True, - ) - - def test_cast_to_discrete_values_with_smooth_rounding_ordinal_values(self): - xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] - - x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 3.1]]) - xlimits = np.array( - [[-5, 5], ["blue", "red"], ["short", "medium", "long"], ["0", "2", "4"]], - dtype="object", - ) - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - self.assertEqual( - np.array_equal( - np.array([[2.6, 0, 1, 0, 0, 1, 4]]), - cast_to_discrete_values(xspecs, True, x), - ), - True, - ) - - def test_cast_to_discrete_values_with_hard_rounding_ordinal_values(self): - xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] - - x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 3.1]]) - xlimits = np.array( - [[-5, 5], ["blue", "red"], ["short", "medium", "long"], ["0", "4"]], - dtype="object", - ) - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - self.assertEqual( - np.array_equal( - np.array([[2.6, 0, 1, 0, 0, 1, 4]]), - cast_to_discrete_values(xspecs, True, x), - ), - True, - ) - - def test_cast_to_discrete_values_with_non_integer_ordinal_values(self): - xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] - - x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 3.1]]) - xlimits = np.array( - [[-5, 5], ["blue", "red"], ["short", "medium", "long"], ["0", "3.5"]], - dtype="object", - ) - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - self.assertEqual( - np.array_equal( - np.array([[2.6, 0, 1, 0, 0, 1, 3.5]]), - cast_to_discrete_values(xspecs, True, x), - ), - True, - ) - - def test_examples(self): - self.run_mixed_integer_lhs_example() - self.run_mixed_integer_qp_example() - self.run_mixed_integer_context_example() - - def run_mixed_integer_lhs_example(self): - import numpy as np - import matplotlib.pyplot as plt - from matplotlib import colors - - from smt.sampling_methods import LHS - from smt.surrogate_models import XType, XSpecs - from smt.applications.mixed_integer import MixedIntegerSamplingMethod - - xtypes = [XType.FLOAT, (XType.ENUM, 2)] - xlimits = [[0.0, 4.0], ["blue", "red"]] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - sampling = MixedIntegerSamplingMethod(LHS, xspecs, criterion="ese") - - num = 40 - x = sampling(num) - - cmap = colors.ListedColormap(xlimits[1]) - plt.scatter(x[:, 0], np.zeros(num), c=x[:, 1], cmap=cmap) - plt.show() - - def run_mixed_integer_qp_example(self): - import numpy as np - import matplotlib.pyplot as plt - - from smt.surrogate_models import QP, XType, XSpecs - from smt.applications.mixed_integer import MixedIntegerSurrogateModel - - xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) - yt = np.array([0.0, 1.0, 1.5, 0.5, 1.0]) - - # xtypes = [XType.FLOAT, XType.ORD, (ENUM, 3), (ENUM, 2)] - # XType.FLOAT means x1 continuous - # XType.ORD means x2 ordered - # (ENUM, 3) means x3, x4 & x5 are 3 levels of the same categorical variable - # (ENUM, 2) means x6 & x7 are 2 levels of the same categorical variable - xspecs = XSpecs(xtypes=[XType.ORD], xlimits=[[0, 4]]) - sm = MixedIntegerSurrogateModel(xspecs=xspecs, surrogate=QP()) - sm.set_training_values(xt, yt) - sm.train() - - num = 100 - x = np.linspace(0.0, 4.0, num) - y = sm.predict_values(x) - - plt.plot(xt, yt, "o") - plt.plot(x, y) - plt.xlabel("x") - plt.ylabel("y") - plt.legend(["Training data", "Prediction"]) - plt.show() - - def run_mixed_integer_context_example(self): - import numpy as np - import matplotlib.pyplot as plt - from matplotlib import colors - from mpl_toolkits.mplot3d import Axes3D - - from smt.sampling_methods import LHS, Random - from smt.surrogate_models import KRG, XType, XSpecs - from smt.applications.mixed_integer import MixedIntegerContext - - xtypes = [XType.ORD, XType.FLOAT, (XType.ENUM, 4)] - xlimits = [[0, 5], [0.0, 4.0], ["blue", "red", "green", "yellow"]] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - def ftest(x): - return (x[:, 0] * x[:, 0] + x[:, 1] * x[:, 1]) * (x[:, 2] + 1) - - # context to create consistent DOEs and surrogate - mixint = MixedIntegerContext(xspecs=xspecs) - - # DOE for training - lhs = mixint.build_sampling_method(LHS, criterion="ese") - - num = mixint.get_unfolded_dimension() * 5 - print("DOE point nb = {}".format(num)) - xt = lhs(num) - yt = ftest(xt) - - # Surrogate - sm = mixint.build_kriging_model(KRG()) - sm.set_training_values(xt, yt) - sm.train() - - # DOE for validation - rand = mixint.build_sampling_method(Random) - xv = rand(50) - yv = ftest(xv) - yp = sm.predict_values(xv) - - plt.plot(yv, yv) - plt.plot(yv, yp, "o") - plt.xlabel("actual") - plt.ylabel("prediction") - - plt.show() - - def test_hierarchical_variables_Goldstein(self): - def H(x1, x2, x3, x4, z3, z4, x5, cos_term): - h = ( - 53.3108 - + 0.184901 * x1 - - 5.02914 * x1**3 * 10 ** (-6) - + 7.72522 * x1**z3 * 10 ** (-8) - - 0.0870775 * x2 - - 0.106959 * x3 - + 7.98772 * x3**z4 * 10 ** (-6) - + 0.00242482 * x4 - + 1.32851 * x4**3 * 10 ** (-6) - - 0.00146393 * x1 * x2 - - 0.00301588 * x1 * x3 - - 0.00272291 * x1 * x4 - + 0.0017004 * x2 * x3 - + 0.0038428 * x2 * x4 - - 0.000198969 * x3 * x4 - + 1.86025 * x1 * x2 * x3 * 10 ** (-5) - - 1.88719 * x1 * x2 * x4 * 10 ** (-6) - + 2.50923 * x1 * x3 * x4 * 10 ** (-5) - - 5.62199 * x2 * x3 * x4 * 10 ** (-5) - ) - if cos_term: - h += 5.0 * np.cos(2.0 * np.pi * (x5 / 100.0)) - 2.0 - return h - - def f1(x1, x2, z1, z2, z3, z4, x5, cos_term): - c1 = z2 == 0 - c2 = z2 == 1 - c3 = z2 == 2 - - c4 = z3 == 0 - c5 = z3 == 1 - c6 = z3 == 2 - - y = ( - c4 - * ( - c1 * H(x1, x2, 20, 20, z3, z4, x5, cos_term) - + c2 * H(x1, x2, 50, 20, z3, z4, x5, cos_term) - + c3 * H(x1, x2, 80, 20, z3, z4, x5, cos_term) - ) - + c5 - * ( - c1 * H(x1, x2, 20, 50, z3, z4, x5, cos_term) - + c2 * H(x1, x2, 50, 50, z3, z4, x5, cos_term) - + c3 * H(x1, x2, 80, 50, z3, z4, x5, cos_term) - ) - + c6 - * ( - c1 * H(x1, x2, 20, 80, z3, z4, x5, cos_term) - + c2 * H(x1, x2, 50, 80, z3, z4, x5, cos_term) - + c3 * H(x1, x2, 80, 80, z3, z4, x5, cos_term) - ) - ) - return y - - def f2(x1, x2, x3, z2, z3, z4, x5, cos_term): - c1 = z2 == 0 - c2 = z2 == 1 - c3 = z2 == 2 - - y = ( - c1 * H(x1, x2, x3, 20, z3, z4, x5, cos_term) - + c2 * H(x1, x2, x3, 50, z3, z4, x5, cos_term) - + c3 * H(x1, x2, x3, 80, z3, z4, x5, cos_term) - ) - return y - - def f3(x1, x2, x4, z1, z3, z4, x5, cos_term): - c1 = z1 == 0 - c2 = z1 == 1 - c3 = z1 == 2 - - y = ( - c1 * H(x1, x2, 20, x4, z3, z4, x5, cos_term) - + c2 * H(x1, x2, 50, x4, z3, z4, x5, cos_term) - + c3 * H(x1, x2, 80, x4, z3, z4, x5, cos_term) - ) - return y - - def f_hv(X): - y = [] - for x in X: - if x[0] == 0: - y.append( - f1(x[2], x[3], x[7], x[8], x[9], x[10], x[6], cos_term=x[1]) - ) - elif x[0] == 1: - y.append( - f2(x[2], x[3], x[4], x[8], x[9], x[10], x[6], cos_term=x[1]) - ) - elif x[0] == 2: - y.append( - f3(x[2], x[3], x[5], x[7], x[9], x[10], x[6], cos_term=x[1]) - ) - elif x[0] == 3: - y.append( - H(x[2], x[3], x[4], x[5], x[9], x[10], x[6], cos_term=x[1]) - ) - return np.array(y) - - xlimits = [ - ["6,7", "3,7", "4,6", "3,4"], # meta1 ord - [0, 1], # 0 - [0, 100], # 1 - [0, 100], # 2 - [0, 100], # 3 - [0, 100], # 4 - [0, 100], # 5 - [0, 2], # 6 - [0, 2], # 7 - [0, 2], # 8 - [0, 2], # 9 - ] - xroles = [ - XRole.META, - XRole.NEUTRAL, - XRole.NEUTRAL, - XRole.NEUTRAL, - XRole.DECREED, - XRole.DECREED, - XRole.NEUTRAL, - XRole.DECREED, - XRole.DECREED, - XRole.NEUTRAL, - XRole.NEUTRAL, - ] - # z or x, cos?; x1,x2, x3, x4, x5:cos, z1,z2; exp1,exp2 - - xtypes = [ - (XType.ENUM, 4), - XType.ORD, - XType.FLOAT, - XType.FLOAT, - XType.FLOAT, - XType.FLOAT, - XType.FLOAT, - XType.ORD, - XType.ORD, - XType.ORD, - XType.ORD, - ] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits, xroles=xroles) - n_doe = 15 - sampling = MixedIntegerSamplingMethod( - LHS, xspecs, criterion="ese", random_state=42 - ) - Xt = sampling(n_doe) - Yt = f_hv(Xt) - - sm = MixedIntegerKrigingModel( - surrogate=KRG( - xspecs=xspecs, - categorical_kernel=MixIntKernelType.HOMO_HSPHERE, - theta0=[1e-2], - corr="abs_exp", - n_start=5, - ), - ) - sm.set_training_values(Xt, Yt) - sm.train() - y_s = sm.predict_values(Xt)[:, 0] - pred_RMSE = np.linalg.norm(y_s - Yt) / len(Yt) - - y_sv = sm.predict_variances(Xt)[:, 0] - var_RMSE = np.linalg.norm(y_sv) / len(Yt) - self.assertTrue(pred_RMSE < 1e-7) - print("Pred_RMSE", pred_RMSE) - self.assertTrue(var_RMSE < 1e-7) - self.assertTrue( - np.linalg.norm( - sm.predict_values( - np.array( - [ - [0.0, 1.0, 64.0, 4.0, 56.0, 37.0, 35.0, 1.0, 2.0, 1.0, 1.0], - [1.0, 0.0, 31.0, 92.0, 24.0, 3.0, 17.0, 1.0, 2.0, 1.0, 1.0], - [2.0, 1.0, 28.0, 60.0, 77.0, 66.0, 9.0, 0.0, 1.0, 1.0, 1.0], - [ - 3.0, - 1.0, - 50.0, - 40.0, - 99.0, - 35.0, - 51.0, - 2.0, - 1.0, - 1.0, - 2.0, - ], - ] - ) - )[:, 0] - - sm.predict_values( - np.array( - [ - [0.0, 1.0, 64.0, 4.0, 6.0, 7.0, 35.0, 1.0, 2.0, 1.0, 1.0], - [ - 1.0, - 0.0, - 31.0, - 92.0, - 24.0, - 30.0, - 17.0, - 0.0, - 2.0, - 1.0, - 1.0, - ], - [2.0, 1.0, 28.0, 60.0, 7.0, 66.0, 9.0, 0.0, 2.0, 1.0, 1.0], - [ - 3.0, - 1.0, - 50.0, - 40.0, - 99.0, - 35.0, - 51.0, - 0.0, - 0.0, - 1.0, - 2.0, - ], - ] - ) - )[:, 0] - ) - < 1e-8 - ) - self.assertTrue( - np.linalg.norm( - sm.predict_values( - np.array( - [[1.0, 0.0, 31.0, 92.0, 24.0, 3.0, 17.0, 1.0, 2.0, 1.0, 1.0]] - ) - ) - - sm.predict_values( - np.array( - [[1.0, 0.0, 31.0, 92.0, 24.0, 3.0, 17.0, 1.0, 1.0, 1.0, 1.0]] - ) - ) - ) - > 1e-8 - ) - - def test_hierarchical_variables_NN(self): - def f_neu(x1, x2, x3, x4): - if x4 == 0: - return 2 * x1 + x2 - 0.5 * x3 - if x4 == 1: - return -x1 + 2 * x2 - 0.5 * x3 - if x4 == 2: - return -x1 + x2 + 0.5 * x3 - - def f1(x1, x2, x3, x4, x5): - return f_neu(x1, x2, x3, x4) + x5**2 - - def f2(x1, x2, x3, x4, x5, x6): - return f_neu(x1, x2, x3, x4) + (x5**2) + 0.3 * x6 - - def f3(x1, x2, x3, x4, x5, x6, x7): - return f_neu(x1, x2, x3, x4) + (x5**2) + 0.3 * x6 - 0.1 * x7**3 - - def f(X): - y = [] - for x in X: - if x[0] == 1: - y.append(f1(x[1], x[2], x[3], x[4], x[5])) - elif x[0] == 2: - y.append(f2(x[1], x[2], x[3], x[4], x[5], x[6])) - elif x[0] == 3: - y.append(f3(x[1], x[2], x[3], x[4], x[5], x[6], x[7])) - return np.array(y) - - xlimits = [ - [1, 3], # meta ord - [-5, -2], - [-5, -1], - ["8", "16", "32", "64", "128", "256"], - ["ReLU", "SELU", "ISRLU"], - [0.0, 5.0], # decreed m=1 - [0.0, 5.0], # decreed m=2 - [0.0, 5.0], # decreed m=3 - ] - xtypes = [ - XType.ORD, - XType.FLOAT, - XType.FLOAT, - XType.ORD, - (XType.ENUM, 3), - XType.ORD, - XType.ORD, - XType.ORD, - ] - xroles = [ - XRole.META, - XRole.NEUTRAL, - XRole.NEUTRAL, - XRole.NEUTRAL, - XRole.NEUTRAL, - XRole.DECREED, - XRole.DECREED, - XRole.DECREED, - ] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits, xroles=xroles) - n_doe = 100 - - xspecs_samp = XSpecs(xtypes=xtypes[1:], xlimits=xlimits[1:]) - - sampling = MixedIntegerSamplingMethod( - LHS, xspecs_samp, criterion="ese", random_state=42 - ) - x_cont = sampling(3 * n_doe) - - xdoe1 = np.zeros((n_doe, 6)) - x_cont2 = x_cont[:n_doe, :5] - xdoe1[:, 0] = np.ones(n_doe) - xdoe1[:, 1:] = x_cont2 - ydoe1 = f(xdoe1) - - xdoe1 = np.zeros((n_doe, 8)) - xdoe1[:, 0] = np.ones(n_doe) - xdoe1[:, 1:6] = x_cont2 - - xdoe2 = np.zeros((n_doe, 7)) - x_cont2 = x_cont[n_doe : 2 * n_doe, :6] - xdoe2[:, 0] = 2 * np.ones(n_doe) - xdoe2[:, 1:7] = x_cont2 - ydoe2 = f(xdoe2) - - xdoe2 = np.zeros((n_doe, 8)) - xdoe2[:, 0] = 2 * np.ones(n_doe) - xdoe2[:, 1:7] = x_cont2 - - xdoe3 = np.zeros((n_doe, 8)) - xdoe3[:, 0] = 3 * np.ones(n_doe) - xdoe3[:, 1:] = x_cont[2 * n_doe :, :] - ydoe3 = f(xdoe3) - - Xt = np.concatenate((xdoe1, xdoe2, xdoe3), axis=0) - Yt = np.concatenate((ydoe1, ydoe2, ydoe3), axis=0) - sm = MixedIntegerKrigingModel( - surrogate=KRG( - xspecs=xspecs, - categorical_kernel=MixIntKernelType.HOMO_HSPHERE, - theta0=[1e-2], - corr="abs_exp", - n_start=5, - ), - ) - sm.set_training_values(Xt, Yt) - sm.train() - y_s = sm.predict_values(Xt)[:, 0] - pred_RMSE = np.linalg.norm(y_s - Yt) / len(Yt) - - y_sv = sm.predict_variances(Xt)[:, 0] - var_RMSE = np.linalg.norm(y_sv) / len(Yt) - self.assertTrue(pred_RMSE < 1e-7) - print("Pred_RMSE", pred_RMSE) - self.assertTrue(var_RMSE < 1e-7) - self.assertTrue( - np.linalg.norm( - sm.predict_values( - np.array( - [ - [1, -1, -2, 8, 0, 2, 0, 0], - [2, -1, -2, 16, 1, 2, 1, 0], - [3, -1, -2, 32, 2, 2, 1, -2], - ] - ) - )[:, 0] - - sm.predict_values( - np.array( - [ - [1, -1, -2, 8, 0, 2, 10, 10], - [2, -1, -2, 16, 1, 2, 1, 10], - [3, -1, -2, 32, 2, 2, 1, -2], - ] - ) - )[:, 0] - ) - < 1e-8 - ) - self.assertTrue( - np.linalg.norm( - sm.predict_values(np.array([[1, -1, -2, 8, 0, 2, 0, 0]])) - - sm.predict_values(np.array([[1, -1, -2, 8, 0, 12, 10, 10]])) - ) - > 1e-8 - ) - - def test_mixed_gower_2D(self): - xt = np.array([[0, 5], [2, -1], [4, 0.5]]) - yt = np.array([[0.0], [1.0], [1.5]]) - xlimits = [["0.0", "1.0", " 2.0", "3.0", "4.0"], [-5, 5]] - xtypes = [(XType.ENUM, 5), XType.FLOAT] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - - # Surrogate - sm = MixedIntegerKrigingModel( - surrogate=KRG( - xspecs=xspecs, - theta0=[1e-2], - corr="abs_exp", - categorical_kernel=MixIntKernelType.GOWER, - ), - ) - sm.set_training_values(xt, yt) - sm.train() - - # DOE for validation - x = np.linspace(0, 4, 5) - x2 = np.linspace(-5, 5, 21) - x1 = [] - for element in itertools.product(x, x2): - x1.append(np.array(element)) - x_pred = np.array(x1) - - y = sm.predict_values(x_pred) - yvar = sm.predict_variances(x_pred) - - # prediction are correct on known points - self.assertTrue(np.abs(np.sum(np.array([y[20], y[50], y[95]]) - yt)) < 1e-6) - self.assertTrue(np.abs(np.sum(np.array([yvar[20], yvar[50], yvar[95]]))) < 1e-6) - - self.assertEqual(np.shape(y), (105, 1)) - - def test_mixed_homo_gaussian_2D(self): - xt = np.array([[0, 5], [2, -1], [4, 0.5]]) - yt = np.array([[0.0], [1.0], [1.5]]) - xlimits = [["0.0", "1.0", " 2.0", "3.0", "4.0"], [-5, 5]] - xtypes = [(XType.ENUM, 5), XType.FLOAT] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - # Surrogate - sm = MixedIntegerKrigingModel( - surrogate=KRG( - xspecs=xspecs, - theta0=[1e-2], - corr="abs_exp", - categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE, - ), - ) - sm.set_training_values(xt, yt) - sm.train() - - # DOE for validation - x = np.linspace(0, 4, 5) - x2 = np.linspace(-5, 5, 21) - x1 = [] - for element in itertools.product(x, x2): - x1.append(np.array(element)) - x_pred = np.array(x1) - - y = sm.predict_values(x_pred) - yvar = sm.predict_variances(x_pred) - - # prediction are correct on known points - self.assertTrue(np.abs(np.sum(np.array([y[20], y[50], y[95]]) - yt)) < 1e-6) - self.assertTrue(np.abs(np.sum(np.array([yvar[20], yvar[50], yvar[95]]))) < 1e-6) - - self.assertEqual(np.shape(y), (105, 1)) - - def test_mixed_homo_hyp_2D(self): - xt = np.array([[0, 5], [2, -1], [4, 0.5]]) - yt = np.array([[0.0], [1.0], [1.5]]) - xlimits = [["0.0", "1.0", " 2.0", "3.0", "4.0"], [-5, 5]] - xtypes = [(XType.ENUM, 5), XType.FLOAT] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - # Surrogate - sm = MixedIntegerKrigingModel( - surrogate=KRG( - xspecs=xspecs, - theta0=[1e-2], - categorical_kernel=MixIntKernelType.HOMO_HSPHERE, - corr="abs_exp", - ), - ) - sm.set_training_values(xt, yt) - sm.train() - - # DOE for validation - x = np.linspace(0, 4, 5) - x2 = np.linspace(-5, 5, 21) - x1 = [] - for element in itertools.product(x, x2): - x1.append(np.array(element)) - x_pred = np.array(x1) - - y = sm.predict_values(x_pred) - yvar = sm.predict_variances(x_pred) - - # prediction are correct on known points - self.assertTrue(np.abs(np.sum(np.array([y[20], y[50], y[95]]) - yt)) < 1e-6) - self.assertTrue(np.abs(np.sum(np.array([yvar[20], yvar[50], yvar[95]]))) < 1e-6) - - self.assertEqual(np.shape(y), (105, 1)) - - def test_mixed_homo_gaussian_3D_PLS(self): - xt = np.array([[0.5, 0, 5], [2, 3, 4], [5, 2, -1], [-2, 4, 0.5]]) - yt = np.array([[0.0], [3], [1.0], [1.5]]) - xlimits = [[-5, 5], ["0.0", "1.0", " 2.0", "3.0", "4.0"], [-5, 5]] - xtypes = [XType.FLOAT, (XType.ENUM, 5), XType.FLOAT] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - # Surrogate - sm = surrogate = KPLS( - xspecs=xspecs, - theta0=[1e-2], - n_comp=1, - categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE, - cat_kernel_comps=[3], - corr="squar_exp", - ) - sm.set_training_values(xt, yt) - sm.train() - - # DOE for validation - x = np.linspace(0, 4, 5) - x2 = np.linspace(-5, 5, 21) - x1 = [] - for element in itertools.product(x2, x, x2): - x1.append(np.array(element)) - x_pred = np.array(x1) - - i = 0 - i += 1 - y = sm.predict_values(x_pred) - yvar = sm.predict_variances(x_pred) - - self.assertTrue((np.abs(np.sum(np.array(sm.predict_values(xt) - yt)))) < 1e-6) - self.assertTrue((np.abs(np.sum(np.array(sm.predict_variances(xt) - 0)))) < 1e-6) - - def test_mixed_homo_gaussian_3D_PLS_cate(self): - xt = np.array([[0.5, 0, 5], [2, 3, 4], [5, 2, -1], [-2, 4, 0.5]]) - yt = np.array([[0.0], [3], [1.0], [1.5]]) - xlimits = [[-5, 5], ["0.0", "1.0", " 2.0", "3.0", "4.0"], [-5, 5]] - xtypes = [XType.FLOAT, (XType.ENUM, 5), XType.FLOAT] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - # Surrogate - sm = KPLS( - xspecs=xspecs, - theta0=[1e-2], - n_comp=2, - corr="abs_exp", - cat_kernel_comps=[3], - categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE, - ) - - sm.set_training_values(xt, yt) - sm.train() - - # DOE for validation - x = np.linspace(0, 4, 5) - x2 = np.linspace(-5, 5, 21) - x1 = [] - for element in itertools.product(x2, x, x2): - x1.append(np.array(element)) - x_pred = np.array(x1) - - i = 0 - i += 1 - y = sm.predict_values(x_pred) - yvar = sm.predict_variances(x_pred) - - self.assertTrue((np.abs(np.sum(np.array(sm.predict_values(xt) - yt)))) < 1e-6) - self.assertTrue((np.abs(np.sum(np.array(sm.predict_variances(xt) - 0)))) < 1e-6) - - def test_mixed_homo_hyp_3D_PLS_cate(self): - xt = np.array([[0.5, 0, 5], [2, 3, 4], [5, 2, -1], [-2, 4, 0.5]]) - yt = np.array([[0.0], [3], [1.0], [1.5]]) - xlimits = [[-5, 5], ["0.0", "1.0", " 2.0", "3.0", "4.0"], [-5, 5]] - xtypes = [XType.FLOAT, (XType.ENUM, 5), XType.FLOAT] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - # Surrogate - sm = MixedIntegerKrigingModel( - surrogate=KPLS( - xspecs=xspecs, - theta0=[1e-2], - n_comp=1, - categorical_kernel=MixIntKernelType.HOMO_HSPHERE, - cat_kernel_comps=[3], - corr="squar_exp", - ), - ) - sm.set_training_values(xt, yt) - sm.train() - - # DOE for validation - x = np.linspace(0, 4, 5) - x2 = np.linspace(-5, 5, 21) - x1 = [] - for element in itertools.product(x2, x, x2): - x1.append(np.array(element)) - x_pred = np.array(x1) - - i = 0 - i += 1 - y = sm.predict_values(x_pred) - yvar = sm.predict_variances(x_pred) - - self.assertTrue((np.abs(np.sum(np.array(sm.predict_values(xt) - yt)))) < 1e-6) - self.assertTrue((np.abs(np.sum(np.array(sm.predict_variances(xt) - 0)))) < 1e-6) - - def test_mixed_homo_gaussian_3D_ord_cate(self): - xt = np.array([[0.5, 0, 5], [2, 3, 4], [5, 2, -1], [-2, 4, 0.5]]) - yt = np.array([[0.0], [3], [1.0], [1.5]]) - xlimits = [ - ["0.0", "1.0", " 2.0", "3.0", "4.0"], - [-5, 5], - ["0.0", "1.0", " 2.0", "3.0"], - ] - xtypes = [(XType.ENUM, 5), XType.ORD, (XType.ENUM, 4)] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - # Surrogate - sm = MixedIntegerKrigingModel( - surrogate=KPLS( - xspecs=xspecs, - theta0=[1e-2], - n_comp=1, - categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE, - cat_kernel_comps=[3, 2], - corr="squar_exp", - ), - ) - sm.set_training_values(xt, yt) - sm.train() - - # DOE for validation - x = np.linspace(0, 4, 5) - x2 = np.linspace(-5, 5, 21) - x3 = np.linspace(0, 3, 4) - x1 = [] - for element in itertools.product(x, x2, x3): - x1.append(np.array(element)) - x_pred = np.array(x1) - - y = sm.predict_values(x_pred) - yvar = sm.predict_variances(x_pred) - - # prediction are correct on known points - self.assertTrue((np.abs(np.sum(np.array(sm.predict_values(xt) - yt)) < 1e-6))) - self.assertTrue((np.abs(np.sum(np.array(sm.predict_variances(xt) - 0)) < 1e-6))) - - def test_mixed_gower_3D(self): - xtypes = [XType.FLOAT, XType.ORD, XType.ORD] - xlimits = [[-10, 10], [-10, 10], [-10, 10]] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - mixint = MixedIntegerContext(xspecs=xspecs) - - sm = mixint.build_kriging_model( - KRG(categorical_kernel=MixIntKernelType.GOWER, print_prediction=False) - ) - sampling = mixint.build_sampling_method(LHS, criterion="m") - - fun = Sphere(ndim=3) - xt = sampling(10) - yt = fun(xt) - sm.set_training_values(xt, yt) - sm.train() - eq_check = True - for i in range(xt.shape[0]): - if abs(float(xt[i, :][1]) - int(float(xt[i, :][1]))) > 10e-8: - eq_check = False - self.assertTrue(eq_check) - - def test_examples(self): - self.run_mixed_gower_example() - self.run_mixed_homo_gaussian_example() - self.run_mixed_homo_hyp_example() - - def run_mixed_gower_example(self): - import numpy as np - import matplotlib.pyplot as plt - - from smt.surrogate_models import KRG, XType, XSpecs, MixIntKernelType - from smt.applications.mixed_integer import MixedIntegerKrigingModel - - xt1 = np.array([[0, 0.0], [0, 2.0], [0, 4.0]]) - xt2 = np.array([[1, 0.0], [1, 2.0], [1, 3.0]]) - xt3 = np.array([[2, 1.0], [2, 2.0], [2, 4.0]]) - - xt = np.concatenate((xt1, xt2, xt3), axis=0) - xt[:, 1] = xt[:, 1].astype(np.float64) - yt1 = np.array([0.0, 9.0, 16.0]) - yt2 = np.array([0.0, -4, -13.0]) - yt3 = np.array([-10, 3, 11.0]) - - yt = np.concatenate((yt1, yt2, yt3), axis=0) - xlimits = [["Blue", "Red", "Green"], [0.0, 4.0]] - xtypes = [(XType.ENUM, 3), XType.FLOAT] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - # Surrogate - sm = MixedIntegerKrigingModel( - surrogate=KRG( - xspecs=xspecs, - categorical_kernel=MixIntKernelType.GOWER, - theta0=[1e-1], - corr="squar_exp", - n_start=20, - ), - ) - sm.set_training_values(xt, yt) - sm.train() - - # DOE for validation - n = 100 - x_cat1 = [] - x_cat2 = [] - x_cat3 = [] - - for i in range(n): - x_cat1.append(0) - x_cat2.append(1) - x_cat3.append(2) - - x_cont = np.linspace(0.0, 4.0, n) - x1 = np.concatenate( - (np.asarray(x_cat1).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 - ) - x2 = np.concatenate( - (np.asarray(x_cat2).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 - ) - x3 = np.concatenate( - (np.asarray(x_cat3).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 - ) - - y1 = sm.predict_values(x1) - y2 = sm.predict_values(x2) - y3 = sm.predict_values(x3) - - # estimated variance - s2_1 = sm.predict_variances(x1) - s2_2 = sm.predict_variances(x2) - s2_3 = sm.predict_variances(x3) - - fig, axs = plt.subplots(3, figsize=(8, 6)) - - axs[0].plot(xt1[:, 1].astype(np.float64), yt1, "o", linestyle="None") - axs[0].plot(x_cont, y1, color="Blue") - axs[0].fill_between( - np.ravel(x_cont), - np.ravel(y1 - 3 * np.sqrt(s2_1)), - np.ravel(y1 + 3 * np.sqrt(s2_1)), - color="lightgrey", - ) - axs[0].set_xlabel("x") - axs[0].set_ylabel("y") - axs[0].legend( - ["Training data", "Prediction", "Confidence Interval 99%"], - loc="upper left", - bbox_to_anchor=[0, 1], - ) - axs[1].plot( - xt2[:, 1].astype(np.float64), yt2, marker="o", color="r", linestyle="None" - ) - axs[1].plot(x_cont, y2, color="Red") - axs[1].fill_between( - np.ravel(x_cont), - np.ravel(y2 - 3 * np.sqrt(s2_2)), - np.ravel(y2 + 3 * np.sqrt(s2_2)), - color="lightgrey", - ) - axs[1].set_xlabel("x") - axs[1].set_ylabel("y") - axs[1].legend( - ["Training data", "Prediction", "Confidence Interval 99%"], - loc="upper left", - bbox_to_anchor=[0, 1], - ) - axs[2].plot( - xt3[:, 1].astype(np.float64), yt3, marker="o", color="r", linestyle="None" - ) - axs[2].plot(x_cont, y3, color="Green") - axs[2].fill_between( - np.ravel(x_cont), - np.ravel(y3 - 3 * np.sqrt(s2_3)), - np.ravel(y3 + 3 * np.sqrt(s2_3)), - color="lightgrey", - ) - axs[2].set_xlabel("x") - axs[2].set_ylabel("y") - axs[2].legend( - ["Training data", "Prediction", "Confidence Interval 99%"], - loc="upper left", - bbox_to_anchor=[0, 1], - ) - plt.tight_layout() - plt.show() - - def run_mixed_homo_gaussian_example(self): - import numpy as np - import matplotlib.pyplot as plt - - from smt.surrogate_models import KRG, XType, XSpecs, MixIntKernelType - from smt.applications.mixed_integer import MixedIntegerKrigingModel - - xt1 = np.array([[0, 0.0], [0, 2.0], [0, 4.0]]) - xt2 = np.array([[1, 0.0], [1, 2.0], [1, 3.0]]) - xt3 = np.array([[2, 1.0], [2, 2.0], [2, 4.0]]) - - xt = np.concatenate((xt1, xt2, xt3), axis=0) - xt[:, 1] = xt[:, 1].astype(np.float64) - yt1 = np.array([0.0, 9.0, 16.0]) - yt2 = np.array([0.0, -4, -13.0]) - yt3 = np.array([-10, 3, 11.0]) - - yt = np.concatenate((yt1, yt2, yt3), axis=0) - xlimits = [["Blue", "Red", "Green"], [0.0, 4.0]] - xtypes = [(XType.ENUM, 3), XType.FLOAT] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - # Surrogate - sm = MixedIntegerKrigingModel( - surrogate=KRG( - xspecs=xspecs, - theta0=[1e-1], - corr="squar_exp", - n_start=20, - categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE, - ), - ) - sm.set_training_values(xt, yt) - sm.train() - - # DOE for validation - n = 100 - x_cat1 = [] - x_cat2 = [] - x_cat3 = [] - - for i in range(n): - x_cat1.append(0) - x_cat2.append(1) - x_cat3.append(2) - - x_cont = np.linspace(0.0, 4.0, n) - x1 = np.concatenate( - (np.asarray(x_cat1).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 - ) - x2 = np.concatenate( - (np.asarray(x_cat2).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 - ) - x3 = np.concatenate( - (np.asarray(x_cat3).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 - ) - - y1 = sm.predict_values(x1) - y2 = sm.predict_values(x2) - y3 = sm.predict_values(x3) - - # estimated variance - s2_1 = sm.predict_variances(x1) - s2_2 = sm.predict_variances(x2) - s2_3 = sm.predict_variances(x3) - - fig, axs = plt.subplots(3, figsize=(8, 6)) - - axs[0].plot(xt1[:, 1].astype(np.float64), yt1, "o", linestyle="None") - axs[0].plot(x_cont, y1, color="Blue") - axs[0].fill_between( - np.ravel(x_cont), - np.ravel(y1 - 3 * np.sqrt(s2_1)), - np.ravel(y1 + 3 * np.sqrt(s2_1)), - color="lightgrey", - ) - axs[0].set_xlabel("x") - axs[0].set_ylabel("y") - axs[0].legend( - ["Training data", "Prediction", "Confidence Interval 99%"], - loc="upper left", - bbox_to_anchor=[0, 1], - ) - axs[1].plot( - xt2[:, 1].astype(np.float64), yt2, marker="o", color="r", linestyle="None" - ) - axs[1].plot(x_cont, y2, color="Red") - axs[1].fill_between( - np.ravel(x_cont), - np.ravel(y2 - 3 * np.sqrt(s2_2)), - np.ravel(y2 + 3 * np.sqrt(s2_2)), - color="lightgrey", - ) - axs[1].set_xlabel("x") - axs[1].set_ylabel("y") - axs[1].legend( - ["Training data", "Prediction", "Confidence Interval 99%"], - loc="upper left", - bbox_to_anchor=[0, 1], - ) - axs[2].plot( - xt3[:, 1].astype(np.float64), yt3, marker="o", color="r", linestyle="None" - ) - axs[2].plot(x_cont, y3, color="Green") - axs[2].fill_between( - np.ravel(x_cont), - np.ravel(y3 - 3 * np.sqrt(s2_3)), - np.ravel(y3 + 3 * np.sqrt(s2_3)), - color="lightgrey", - ) - axs[2].set_xlabel("x") - axs[2].set_ylabel("y") - axs[2].legend( - ["Training data", "Prediction", "Confidence Interval 99%"], - loc="upper left", - bbox_to_anchor=[0, 1], - ) - plt.tight_layout() - plt.show() - - def run_mixed_homo_hyp_example(self): - import numpy as np - import matplotlib.pyplot as plt - - from smt.surrogate_models import KRG, XType, XSpecs, MixIntKernelType - from smt.applications.mixed_integer import MixedIntegerKrigingModel - - xt1 = np.array([[0, 0.0], [0, 2.0], [0, 4.0]]) - xt2 = np.array([[1, 0.0], [1, 2.0], [1, 3.0]]) - xt3 = np.array([[2, 1.0], [2, 2.0], [2, 4.0]]) - - xt = np.concatenate((xt1, xt2, xt3), axis=0) - xt[:, 1] = xt[:, 1].astype(np.float64) - yt1 = np.array([0.0, 9.0, 16.0]) - yt2 = np.array([0.0, -4, -13.0]) - yt3 = np.array([-10, 3, 11.0]) - - yt = np.concatenate((yt1, yt2, yt3), axis=0) - xlimits = [["Blue", "Red", "Green"], [0.0, 4.0]] - xtypes = [(XType.ENUM, 3), XType.FLOAT] - xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) - # Surrogate - sm = MixedIntegerKrigingModel( - surrogate=KRG( - xspecs=xspecs, - categorical_kernel=MixIntKernelType.HOMO_HSPHERE, - theta0=[1e-1], - corr="squar_exp", - n_start=20, - ), - ) - sm.set_training_values(xt, yt) - sm.train() - - # DOE for validation - n = 100 - x_cat1 = [] - x_cat2 = [] - x_cat3 = [] - - for i in range(n): - x_cat1.append(0) - x_cat2.append(1) - x_cat3.append(2) - - x_cont = np.linspace(0.0, 4.0, n) - x1 = np.concatenate( - (np.asarray(x_cat1).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 - ) - x2 = np.concatenate( - (np.asarray(x_cat2).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 - ) - x3 = np.concatenate( - (np.asarray(x_cat3).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 - ) - - y1 = sm.predict_values(x1) - y2 = sm.predict_values(x2) - y3 = sm.predict_values(x3) - - # estimated variance - s2_1 = sm.predict_variances(x1) - s2_2 = sm.predict_variances(x2) - s2_3 = sm.predict_variances(x3) - - fig, axs = plt.subplots(3, figsize=(8, 6)) - - axs[0].plot(xt1[:, 1].astype(np.float64), yt1, "o", linestyle="None") - axs[0].plot(x_cont, y1, color="Blue") - axs[0].fill_between( - np.ravel(x_cont), - np.ravel(y1 - 3 * np.sqrt(s2_1)), - np.ravel(y1 + 3 * np.sqrt(s2_1)), - color="lightgrey", - ) - axs[0].set_xlabel("x") - axs[0].set_ylabel("y") - axs[0].legend( - ["Training data", "Prediction", "Confidence Interval 99%"], - loc="upper left", - bbox_to_anchor=[0, 1], - ) - axs[1].plot( - xt2[:, 1].astype(np.float64), yt2, marker="o", color="r", linestyle="None" - ) - axs[1].plot(x_cont, y2, color="Red") - axs[1].fill_between( - np.ravel(x_cont), - np.ravel(y2 - 3 * np.sqrt(s2_2)), - np.ravel(y2 + 3 * np.sqrt(s2_2)), - color="lightgrey", - ) - axs[1].set_xlabel("x") - axs[1].set_ylabel("y") - axs[1].legend( - ["Training data", "Prediction", "Confidence Interval 99%"], - loc="upper left", - bbox_to_anchor=[0, 1], - ) - axs[2].plot( - xt3[:, 1].astype(np.float64), yt3, marker="o", color="r", linestyle="None" - ) - axs[2].plot(x_cont, y3, color="Green") - axs[2].fill_between( - np.ravel(x_cont), - np.ravel(y3 - 3 * np.sqrt(s2_3)), - np.ravel(y3 + 3 * np.sqrt(s2_3)), - color="lightgrey", - ) - axs[2].set_xlabel("x") - axs[2].set_ylabel("y") - axs[2].legend( - ["Training data", "Prediction", "Confidence Interval 99%"], - loc="upper left", - bbox_to_anchor=[0, 1], - ) - plt.tight_layout() - plt.show() - - -if __name__ == "__main__": - TestMixedInteger().run_mixed_integer_context_example() - unittest.main() +""" +Created on Tue Oct 12 10:48:01 2021 +@author: psaves +""" + +import unittest +import numpy as np +import matplotlib +import itertools + +matplotlib.use("Agg") + +from smt.utils.kriging import XSpecs + +from smt.applications.mixed_integer import ( + MixedIntegerContext, + MixedIntegerSamplingMethod, + MixedIntegerKrigingModel, +) +from smt.utils.mixed_integer import ( + unfold_xlimits_with_continuous_limits, + fold_with_enum_index, + unfold_with_enum_mask, + compute_unfolded_dimension, + cast_to_enum_value, + cast_to_mixed_integer, + cast_to_discrete_values, + encode_with_enum_index, +) +from smt.problems import Sphere +from smt.sampling_methods import LHS +from smt.surrogate_models import ( + KRG, + KPLS, + QP, + XType, + XRole, + MixIntKernelType, +) + + +class TestMixedInteger(unittest.TestCase): + def test_krg_mixed_3D_INT(self): + xtypes = [XType.FLOAT, (XType.ENUM, 3), XType.ORD] + xlimits = [[-10, 10], ["blue", "red", "green"], [-10, 10]] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + mixint = MixedIntegerContext(xspecs=xspecs) + + sm = mixint.build_kriging_model(KRG(print_prediction=False)) + sampling = mixint.build_sampling_method(LHS, criterion="m") + + fun = Sphere(ndim=3) + xt = sampling(20) + yt = fun(xt) + sm.set_training_values(xt, yt) + sm.train() + + eq_check = True + for i in range(xt.shape[0]): + if abs(float(xt[i, :][2]) - int(float(xt[i, :][2]))) > 10e-8: + eq_check = False + if not (xt[i, :][1] == 0 or xt[i, :][1] == 1 or xt[i, :][1] == 2): + eq_check = False + self.assertTrue(eq_check) + + def test_krg_mixed_3D(self): + xtypes = [XType.FLOAT, (XType.ENUM, 3), XType.ORD] + xlimits = [[-10, 10], ["blue", "red", "green"], [-10, 10]] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + mixint = MixedIntegerContext(xspecs=xspecs) + + sm = mixint.build_kriging_model(KRG(print_prediction=False)) + sampling = mixint.build_sampling_method(LHS, criterion="m") + + fun = Sphere(ndim=3) + xt = sampling(20) + yt = fun(xt) + sm.set_training_values(xt, yt) + sm.train() + + eq_check = True + for i in range(xt.shape[0]): + if abs(float(xt[i, :][2]) - int(float(xt[i, :][2]))) > 10e-8: + eq_check = False + if not (xt[i, :][1] == 0 or xt[i, :][1] == 1 or xt[i, :][1] == 2): + eq_check = False + self.assertTrue(eq_check) + + def test_krg_mixed_3D_bad_regr(self): + xtypes = [XType.FLOAT, (XType.ENUM, 3), XType.ORD] + xlimits = [[-10, 10], ["blue", "red", "green"], [-10, 10]] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + mixint = MixedIntegerContext(xspecs=xspecs) + with self.assertRaises(ValueError): + sm = mixint.build_kriging_model(KRG(print_prediction=False, poly="linear")) + + def test_qp_mixed_2D_INT(self): + xtypes = [XType.FLOAT, XType.ORD] + xlimits = [[-10, 10], [-10, 10]] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + mixint = MixedIntegerContext(xspecs=xspecs) + sm = mixint.build_surrogate_model(QP(print_prediction=False)) + sampling = mixint.build_sampling_method(LHS, criterion="m") + + fun = Sphere(ndim=2) + xt = sampling(10) + yt = fun(xt) + sm.set_training_values(xt, yt) + sm.train() + + eq_check = True + for i in range(xt.shape[0]): + if abs(float(xt[i, :][1]) - int(float(xt[i, :][1]))) > 10e-8: + eq_check = False + self.assertTrue(eq_check) + + def test_compute_unfolded_dimension(self): + xtypes = [XType.FLOAT, (XType.ENUM, 2)] + self.assertEqual(3, compute_unfolded_dimension(xtypes)) + + def test_unfold_with_enum_mask(self): + xtypes = [XType.FLOAT, (XType.ENUM, 2)] + x = np.array([[1.5, 1], [1.5, 0], [1.5, 1]]) + expected = [[1.5, 0, 1], [1.5, 1, 0], [1.5, 0, 1]] + self.assertListEqual(expected, unfold_with_enum_mask(xtypes, x).tolist()) + + def test_unfold_with_enum_mask_with_enum_first(self): + xtypes = [(XType.ENUM, 2), XType.FLOAT] + x = np.array([[1, 1.5], [0, 1.5], [1, 1.5]]) + expected = [[0, 1, 1.5], [1, 0, 1.5], [0, 1, 1.5]] + self.assertListEqual(expected, unfold_with_enum_mask(xtypes, x).tolist()) + + def test_fold_with_enum_index(self): + xtypes = [XType.FLOAT, (XType.ENUM, 2)] + x = np.array([[1.5, 0, 1], [1.5, 1, 0], [1.5, 0, 1]]) + expected = [[1.5, 1], [1.5, 0], [1.5, 1]] + self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist()) + + def test_fold_with_enum_index_with_list(self): + xtypes = [XType.FLOAT, (XType.ENUM, 2)] + expected = [[1.5, 1]] + x = np.array([1.5, 0, 1]) + self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist()) + x = [1.5, 0, 1] + self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist()) + + def test_cast_to_enum_value(self): + xlimits = [[0.0, 4.0], ["blue", "red"]] + x_col = 1 + enum_indexes = [1, 1, 0, 1, 0] + expected = ["red", "red", "blue", "red", "blue"] + self.assertListEqual(expected, cast_to_enum_value(xlimits, x_col, enum_indexes)) + + def test_unfolded_xlimits_type(self): + xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 2), XType.ORD] + xlimits = np.array([[-5, 5], ["2", "3"], ["4", "5"], [0, 2]]) + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + sampling = MixedIntegerSamplingMethod(LHS, xspecs, criterion="ese") + doe = sampling(10) + self.assertEqual((10, 4), doe.shape) + + def test_cast_to_mixed_integer(self): + xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] + xlimits = np.array( + [[-5, 5], ["blue", "red"], ["short", "medium", "long"], [0, 2]], + dtype="object", + ) + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + x = np.array([1.5, 0, 2, 1.1]) + self.assertEqual([1.5, "blue", "long", 1], cast_to_mixed_integer(xspecs, x)) + + def test_encode_with_enum_index(self): + xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] + xlimits = np.array( + [[-5, 5], ["blue", "red"], ["short", "medium", "long"], [0, 2]], + dtype="object", + ) + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + x = [1.5, "blue", "long", 1] + self.assertEqual( + np.array_equal( + np.array([1.5, 0, 2, 1]), + encode_with_enum_index(xspecs, x), + ), + True, + ) + + def test_unfold_xlimits_with_continuous_limits(self): + xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] + xlimits = np.array( + [[-5, 5], ["blue", "red"], ["short", "medium", "long"], [0, 2]], + dtype="object", + ) + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + l = unfold_xlimits_with_continuous_limits(xspecs) + self.assertEqual( + np.array_equal( + [[-5, 5], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 2]], + unfold_xlimits_with_continuous_limits(xspecs), + ), + True, + ) + + def test_unfold_xlimits_with_continuous_limits_and_ordinal_values(self): + xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] + xlimits = np.array( + [[-5, 5], ["blue", "red"], ["short", "medium", "long"], ["0", "3", "4"]], + dtype="object", + ) + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + l = unfold_xlimits_with_continuous_limits(xspecs) + + self.assertEqual( + np.array_equal( + [[-5, 5], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 4]], + unfold_xlimits_with_continuous_limits(xspecs), + ), + True, + ) + + def test_cast_to_discrete_values(self): + xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] + xlimits = np.array( + [[-5, 5], ["blue", "red"], ["short", "medium", "long"], [0, 4]], + dtype="object", + ) + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 3.1]]) + + self.assertEqual( + np.array_equal( + np.array([[2.6, 0, 1, 0, 0, 1, 3]]), + cast_to_discrete_values(xspecs, True, x), + ), + True, + ) + + def test_cast_to_discrete_values_with_smooth_rounding_ordinal_values(self): + xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] + + x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 3.1]]) + xlimits = np.array( + [[-5, 5], ["blue", "red"], ["short", "medium", "long"], ["0", "2", "4"]], + dtype="object", + ) + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + self.assertEqual( + np.array_equal( + np.array([[2.6, 0, 1, 0, 0, 1, 4]]), + cast_to_discrete_values(xspecs, True, x), + ), + True, + ) + + def test_cast_to_discrete_values_with_hard_rounding_ordinal_values(self): + xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] + + x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 3.1]]) + xlimits = np.array( + [[-5, 5], ["blue", "red"], ["short", "medium", "long"], ["0", "4"]], + dtype="object", + ) + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + self.assertEqual( + np.array_equal( + np.array([[2.6, 0, 1, 0, 0, 1, 4]]), + cast_to_discrete_values(xspecs, True, x), + ), + True, + ) + + def test_cast_to_discrete_values_with_non_integer_ordinal_values(self): + xtypes = [XType.FLOAT, (XType.ENUM, 2), (XType.ENUM, 3), XType.ORD] + + x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 3.1]]) + xlimits = np.array( + [[-5, 5], ["blue", "red"], ["short", "medium", "long"], ["0", "3.5"]], + dtype="object", + ) + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + self.assertEqual( + np.array_equal( + np.array([[2.6, 0, 1, 0, 0, 1, 3.5]]), + cast_to_discrete_values(xspecs, True, x), + ), + True, + ) + + def test_examples(self): + self.run_mixed_integer_lhs_example() + self.run_mixed_integer_qp_example() + self.run_mixed_integer_context_example() + + def run_mixed_integer_lhs_example(self): + import numpy as np + import matplotlib.pyplot as plt + from matplotlib import colors + + from smt.sampling_methods import LHS + from smt.surrogate_models import XType, XSpecs + from smt.applications.mixed_integer import MixedIntegerSamplingMethod + + xtypes = [XType.FLOAT, (XType.ENUM, 2)] + xlimits = [[0.0, 4.0], ["blue", "red"]] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + sampling = MixedIntegerSamplingMethod(LHS, xspecs, criterion="ese") + + num = 40 + x = sampling(num) + + cmap = colors.ListedColormap(xlimits[1]) + plt.scatter(x[:, 0], np.zeros(num), c=x[:, 1], cmap=cmap) + plt.show() + + def run_mixed_integer_qp_example(self): + import numpy as np + import matplotlib.pyplot as plt + + from smt.surrogate_models import QP, XType, XSpecs + from smt.applications.mixed_integer import MixedIntegerSurrogateModel + + xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0]) + yt = np.array([0.0, 1.0, 1.5, 0.5, 1.0]) + + # xtypes = [XType.FLOAT, XType.ORD, (ENUM, 3), (ENUM, 2)] + # XType.FLOAT means x1 continuous + # XType.ORD means x2 ordered + # (ENUM, 3) means x3, x4 & x5 are 3 levels of the same categorical variable + # (ENUM, 2) means x6 & x7 are 2 levels of the same categorical variable + xspecs = XSpecs(xtypes=[XType.ORD], xlimits=[[0, 4]]) + sm = MixedIntegerSurrogateModel(xspecs=xspecs, surrogate=QP()) + sm.set_training_values(xt, yt) + sm.train() + + num = 100 + x = np.linspace(0.0, 4.0, num) + y = sm.predict_values(x) + + plt.plot(xt, yt, "o") + plt.plot(x, y) + plt.xlabel("x") + plt.ylabel("y") + plt.legend(["Training data", "Prediction"]) + plt.show() + + def run_mixed_integer_context_example(self): + import numpy as np + import matplotlib.pyplot as plt + from matplotlib import colors + from mpl_toolkits.mplot3d import Axes3D + + from smt.sampling_methods import LHS, Random + from smt.surrogate_models import KRG, XType, XSpecs + from smt.applications.mixed_integer import MixedIntegerContext + + xtypes = [XType.ORD, XType.FLOAT, (XType.ENUM, 4)] + xlimits = [[0, 5], [0.0, 4.0], ["blue", "red", "green", "yellow"]] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + def ftest(x): + return (x[:, 0] * x[:, 0] + x[:, 1] * x[:, 1]) * (x[:, 2] + 1) + + # context to create consistent DOEs and surrogate + mixint = MixedIntegerContext(xspecs=xspecs) + + # DOE for training + lhs = mixint.build_sampling_method(LHS, criterion="ese") + + num = mixint.get_unfolded_dimension() * 5 + print("DOE point nb = {}".format(num)) + xt = lhs(num) + yt = ftest(xt) + + # Surrogate + sm = mixint.build_kriging_model(KRG()) + sm.set_training_values(xt, yt) + sm.train() + + # DOE for validation + rand = mixint.build_sampling_method(Random) + xv = rand(50) + yv = ftest(xv) + yp = sm.predict_values(xv) + + plt.plot(yv, yv) + plt.plot(yv, yp, "o") + plt.xlabel("actual") + plt.ylabel("prediction") + + plt.show() + + def test_hierarchical_variables_Goldstein(self): + def H(x1, x2, x3, x4, z3, z4, x5, cos_term): + h = ( + 53.3108 + + 0.184901 * x1 + - 5.02914 * x1**3 * 10 ** (-6) + + 7.72522 * x1**z3 * 10 ** (-8) + - 0.0870775 * x2 + - 0.106959 * x3 + + 7.98772 * x3**z4 * 10 ** (-6) + + 0.00242482 * x4 + + 1.32851 * x4**3 * 10 ** (-6) + - 0.00146393 * x1 * x2 + - 0.00301588 * x1 * x3 + - 0.00272291 * x1 * x4 + + 0.0017004 * x2 * x3 + + 0.0038428 * x2 * x4 + - 0.000198969 * x3 * x4 + + 1.86025 * x1 * x2 * x3 * 10 ** (-5) + - 1.88719 * x1 * x2 * x4 * 10 ** (-6) + + 2.50923 * x1 * x3 * x4 * 10 ** (-5) + - 5.62199 * x2 * x3 * x4 * 10 ** (-5) + ) + if cos_term: + h += 5.0 * np.cos(2.0 * np.pi * (x5 / 100.0)) - 2.0 + return h + + def f1(x1, x2, z1, z2, z3, z4, x5, cos_term): + c1 = z2 == 0 + c2 = z2 == 1 + c3 = z2 == 2 + + c4 = z3 == 0 + c5 = z3 == 1 + c6 = z3 == 2 + + y = ( + c4 + * ( + c1 * H(x1, x2, 20, 20, z3, z4, x5, cos_term) + + c2 * H(x1, x2, 50, 20, z3, z4, x5, cos_term) + + c3 * H(x1, x2, 80, 20, z3, z4, x5, cos_term) + ) + + c5 + * ( + c1 * H(x1, x2, 20, 50, z3, z4, x5, cos_term) + + c2 * H(x1, x2, 50, 50, z3, z4, x5, cos_term) + + c3 * H(x1, x2, 80, 50, z3, z4, x5, cos_term) + ) + + c6 + * ( + c1 * H(x1, x2, 20, 80, z3, z4, x5, cos_term) + + c2 * H(x1, x2, 50, 80, z3, z4, x5, cos_term) + + c3 * H(x1, x2, 80, 80, z3, z4, x5, cos_term) + ) + ) + return y + + def f2(x1, x2, x3, z2, z3, z4, x5, cos_term): + c1 = z2 == 0 + c2 = z2 == 1 + c3 = z2 == 2 + + y = ( + c1 * H(x1, x2, x3, 20, z3, z4, x5, cos_term) + + c2 * H(x1, x2, x3, 50, z3, z4, x5, cos_term) + + c3 * H(x1, x2, x3, 80, z3, z4, x5, cos_term) + ) + return y + + def f3(x1, x2, x4, z1, z3, z4, x5, cos_term): + c1 = z1 == 0 + c2 = z1 == 1 + c3 = z1 == 2 + + y = ( + c1 * H(x1, x2, 20, x4, z3, z4, x5, cos_term) + + c2 * H(x1, x2, 50, x4, z3, z4, x5, cos_term) + + c3 * H(x1, x2, 80, x4, z3, z4, x5, cos_term) + ) + return y + + def f_hv(X): + y = [] + for x in X: + if x[0] == 0: + y.append( + f1(x[2], x[3], x[7], x[8], x[9], x[10], x[6], cos_term=x[1]) + ) + elif x[0] == 1: + y.append( + f2(x[2], x[3], x[4], x[8], x[9], x[10], x[6], cos_term=x[1]) + ) + elif x[0] == 2: + y.append( + f3(x[2], x[3], x[5], x[7], x[9], x[10], x[6], cos_term=x[1]) + ) + elif x[0] == 3: + y.append( + H(x[2], x[3], x[4], x[5], x[9], x[10], x[6], cos_term=x[1]) + ) + return np.array(y) + + xlimits = [ + ["6,7", "3,7", "4,6", "3,4"], # meta1 ord + [0, 1], # 0 + [0, 100], # 1 + [0, 100], # 2 + [0, 100], # 3 + [0, 100], # 4 + [0, 100], # 5 + [0, 2], # 6 + [0, 2], # 7 + [0, 2], # 8 + [0, 2], # 9 + ] + xroles = [ + XRole.META, + XRole.NEUTRAL, + XRole.NEUTRAL, + XRole.NEUTRAL, + XRole.DECREED, + XRole.DECREED, + XRole.NEUTRAL, + XRole.DECREED, + XRole.DECREED, + XRole.NEUTRAL, + XRole.NEUTRAL, + ] + # z or x, cos?; x1,x2, x3, x4, x5:cos, z1,z2; exp1,exp2 + + xtypes = [ + (XType.ENUM, 4), + XType.ORD, + XType.FLOAT, + XType.FLOAT, + XType.FLOAT, + XType.FLOAT, + XType.FLOAT, + XType.ORD, + XType.ORD, + XType.ORD, + XType.ORD, + ] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits, xroles=xroles) + n_doe = 15 + sampling = MixedIntegerSamplingMethod( + LHS, xspecs, criterion="ese", random_state=42 + ) + Xt = sampling(n_doe) + Yt = f_hv(Xt) + + sm = MixedIntegerKrigingModel( + surrogate=KRG( + xspecs=xspecs, + categorical_kernel=MixIntKernelType.HOMO_HSPHERE, + theta0=[1e-2], + corr="abs_exp", + n_start=5, + ), + ) + sm.set_training_values(Xt, Yt) + sm.train() + y_s = sm.predict_values(Xt)[:, 0] + pred_RMSE = np.linalg.norm(y_s - Yt) / len(Yt) + + y_sv = sm.predict_variances(Xt)[:, 0] + var_RMSE = np.linalg.norm(y_sv) / len(Yt) + self.assertTrue(pred_RMSE < 1e-7) + print("Pred_RMSE", pred_RMSE) + self.assertTrue(var_RMSE < 1e-7) + self.assertTrue( + np.linalg.norm( + sm.predict_values( + np.array( + [ + [0.0, 1.0, 64.0, 4.0, 56.0, 37.0, 35.0, 1.0, 2.0, 1.0, 1.0], + [1.0, 0.0, 31.0, 92.0, 24.0, 3.0, 17.0, 1.0, 2.0, 1.0, 1.0], + [2.0, 1.0, 28.0, 60.0, 77.0, 66.0, 9.0, 0.0, 1.0, 1.0, 1.0], + [ + 3.0, + 1.0, + 50.0, + 40.0, + 99.0, + 35.0, + 51.0, + 2.0, + 1.0, + 1.0, + 2.0, + ], + ] + ) + )[:, 0] + - sm.predict_values( + np.array( + [ + [0.0, 1.0, 64.0, 4.0, 6.0, 7.0, 35.0, 1.0, 2.0, 1.0, 1.0], + [ + 1.0, + 0.0, + 31.0, + 92.0, + 24.0, + 30.0, + 17.0, + 0.0, + 2.0, + 1.0, + 1.0, + ], + [2.0, 1.0, 28.0, 60.0, 7.0, 66.0, 9.0, 0.0, 2.0, 1.0, 1.0], + [ + 3.0, + 1.0, + 50.0, + 40.0, + 99.0, + 35.0, + 51.0, + 0.0, + 0.0, + 1.0, + 2.0, + ], + ] + ) + )[:, 0] + ) + < 1e-8 + ) + self.assertTrue( + np.linalg.norm( + sm.predict_values( + np.array( + [[1.0, 0.0, 31.0, 92.0, 24.0, 3.0, 17.0, 1.0, 2.0, 1.0, 1.0]] + ) + ) + - sm.predict_values( + np.array( + [[1.0, 0.0, 31.0, 92.0, 24.0, 3.0, 17.0, 1.0, 1.0, 1.0, 1.0]] + ) + ) + ) + > 1e-8 + ) + + def run_hierarchical_variables_Goldstein(self): + import numpy as np + from smt.utils.kriging import XSpecs + from smt.applications.mixed_integer import ( + MixedIntegerContext, + MixedIntegerSamplingMethod, + MixedIntegerKrigingModel, + ) + from smt.sampling_methods import LHS + from smt.surrogate_models import ( + KRG, + KPLS, + QP, + XType, + XRole, + MixIntKernelType, + ) + + def f_hv(X): + import numpy as np + + def H(x1, x2, x3, x4, z3, z4, x5, cos_term): + import numpy as np + + h = ( + 53.3108 + + 0.184901 * x1 + - 5.02914 * x1**3 * 10 ** (-6) + + 7.72522 * x1**z3 * 10 ** (-8) + - 0.0870775 * x2 + - 0.106959 * x3 + + 7.98772 * x3**z4 * 10 ** (-6) + + 0.00242482 * x4 + + 1.32851 * x4**3 * 10 ** (-6) + - 0.00146393 * x1 * x2 + - 0.00301588 * x1 * x3 + - 0.00272291 * x1 * x4 + + 0.0017004 * x2 * x3 + + 0.0038428 * x2 * x4 + - 0.000198969 * x3 * x4 + + 1.86025 * x1 * x2 * x3 * 10 ** (-5) + - 1.88719 * x1 * x2 * x4 * 10 ** (-6) + + 2.50923 * x1 * x3 * x4 * 10 ** (-5) + - 5.62199 * x2 * x3 * x4 * 10 ** (-5) + ) + if cos_term: + h += 5.0 * np.cos(2.0 * np.pi * (x5 / 100.0)) - 2.0 + return h + + def f1(x1, x2, z1, z2, z3, z4, x5, cos_term): + c1 = z2 == 0 + c2 = z2 == 1 + c3 = z2 == 2 + + c4 = z3 == 0 + c5 = z3 == 1 + c6 = z3 == 2 + + y = ( + c4 + * ( + c1 * H(x1, x2, 20, 20, z3, z4, x5, cos_term) + + c2 * H(x1, x2, 50, 20, z3, z4, x5, cos_term) + + c3 * H(x1, x2, 80, 20, z3, z4, x5, cos_term) + ) + + c5 + * ( + c1 * H(x1, x2, 20, 50, z3, z4, x5, cos_term) + + c2 * H(x1, x2, 50, 50, z3, z4, x5, cos_term) + + c3 * H(x1, x2, 80, 50, z3, z4, x5, cos_term) + ) + + c6 + * ( + c1 * H(x1, x2, 20, 80, z3, z4, x5, cos_term) + + c2 * H(x1, x2, 50, 80, z3, z4, x5, cos_term) + + c3 * H(x1, x2, 80, 80, z3, z4, x5, cos_term) + ) + ) + return y + + def f2(x1, x2, x3, z2, z3, z4, x5, cos_term): + c1 = z2 == 0 + c2 = z2 == 1 + c3 = z2 == 2 + + y = ( + c1 * H(x1, x2, x3, 20, z3, z4, x5, cos_term) + + c2 * H(x1, x2, x3, 50, z3, z4, x5, cos_term) + + c3 * H(x1, x2, x3, 80, z3, z4, x5, cos_term) + ) + return y + + def f3(x1, x2, x4, z1, z3, z4, x5, cos_term): + c1 = z1 == 0 + c2 = z1 == 1 + c3 = z1 == 2 + + y = ( + c1 * H(x1, x2, 20, x4, z3, z4, x5, cos_term) + + c2 * H(x1, x2, 50, x4, z3, z4, x5, cos_term) + + c3 * H(x1, x2, 80, x4, z3, z4, x5, cos_term) + ) + return y + + y = [] + for x in X: + if x[0] == 0: + y.append( + f1(x[2], x[3], x[7], x[8], x[9], x[10], x[6], cos_term=x[1]) + ) + elif x[0] == 1: + y.append( + f2(x[2], x[3], x[4], x[8], x[9], x[10], x[6], cos_term=x[1]) + ) + elif x[0] == 2: + y.append( + f3(x[2], x[3], x[5], x[7], x[9], x[10], x[6], cos_term=x[1]) + ) + elif x[0] == 3: + y.append( + H(x[2], x[3], x[4], x[5], x[9], x[10], x[6], cos_term=x[1]) + ) + return np.array(y) + + xlimits = [ + ["6,7", "3,7", "4,6", "3,4"], # meta1 ord + [0, 1], # 0 + [0, 100], # 1 + [0, 100], # 2 + [0, 100], # 3 + [0, 100], # 4 + [0, 100], # 5 + [0, 2], # 6 + [0, 2], # 7 + [0, 2], # 8 + [0, 2], # 9 + ] + xroles = [ + XRole.META, + XRole.NEUTRAL, + XRole.NEUTRAL, + XRole.NEUTRAL, + XRole.DECREED, + XRole.DECREED, + XRole.NEUTRAL, + XRole.DECREED, + XRole.DECREED, + XRole.NEUTRAL, + XRole.NEUTRAL, + ] + # z or x, cos?; x1,x2, x3, x4, x5:cos, z1,z2; exp1,exp2 + + xtypes = [ + (XType.ENUM, 4), + XType.ORD, + XType.FLOAT, + XType.FLOAT, + XType.FLOAT, + XType.FLOAT, + XType.FLOAT, + XType.ORD, + XType.ORD, + XType.ORD, + XType.ORD, + ] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits, xroles=xroles) + n_doe = 15 + sampling = MixedIntegerSamplingMethod( + LHS, xspecs, criterion="ese", random_state=42 + ) + Xt = sampling(n_doe) + Yt = f_hv(Xt) + + sm = MixedIntegerKrigingModel( + surrogate=KRG( + xspecs=xspecs, + categorical_kernel=MixIntKernelType.HOMO_HSPHERE, + theta0=[1e-2], + corr="abs_exp", + n_start=5, + ), + ) + sm.set_training_values(Xt, Yt) + sm.train() + y_s = sm.predict_values(Xt)[:, 0] + pred_RMSE = np.linalg.norm(y_s - Yt) / len(Yt) + + y_sv = sm.predict_variances(Xt)[:, 0] + var_RMSE = np.linalg.norm(y_sv) / len(Yt) + + def test_hierarchical_variables_NN(self): + def f_neu(x1, x2, x3, x4): + if x4 == 0: + return 2 * x1 + x2 - 0.5 * x3 + if x4 == 1: + return -x1 + 2 * x2 - 0.5 * x3 + if x4 == 2: + return -x1 + x2 + 0.5 * x3 + + def f1(x1, x2, x3, x4, x5): + return f_neu(x1, x2, x3, x4) + x5**2 + + def f2(x1, x2, x3, x4, x5, x6): + return f_neu(x1, x2, x3, x4) + (x5**2) + 0.3 * x6 + + def f3(x1, x2, x3, x4, x5, x6, x7): + return f_neu(x1, x2, x3, x4) + (x5**2) + 0.3 * x6 - 0.1 * x7**3 + + def f(X): + y = [] + for x in X: + if x[0] == 1: + y.append(f1(x[1], x[2], x[3], x[4], x[5])) + elif x[0] == 2: + y.append(f2(x[1], x[2], x[3], x[4], x[5], x[6])) + elif x[0] == 3: + y.append(f3(x[1], x[2], x[3], x[4], x[5], x[6], x[7])) + return np.array(y) + + xlimits = [ + [1, 3], # meta ord + [-5, -2], + [-5, -1], + ["8", "16", "32", "64", "128", "256"], + ["ReLU", "SELU", "ISRLU"], + [0.0, 5.0], # decreed m=1 + [0.0, 5.0], # decreed m=2 + [0.0, 5.0], # decreed m=3 + ] + xtypes = [ + XType.ORD, + XType.FLOAT, + XType.FLOAT, + XType.ORD, + (XType.ENUM, 3), + XType.ORD, + XType.ORD, + XType.ORD, + ] + xroles = [ + XRole.META, + XRole.NEUTRAL, + XRole.NEUTRAL, + XRole.NEUTRAL, + XRole.NEUTRAL, + XRole.DECREED, + XRole.DECREED, + XRole.DECREED, + ] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits, xroles=xroles) + n_doe = 100 + + xspecs_samp = XSpecs(xtypes=xtypes[1:], xlimits=xlimits[1:]) + + sampling = MixedIntegerSamplingMethod( + LHS, xspecs_samp, criterion="ese", random_state=42 + ) + x_cont = sampling(3 * n_doe) + + xdoe1 = np.zeros((n_doe, 6)) + x_cont2 = x_cont[:n_doe, :5] + xdoe1[:, 0] = np.ones(n_doe) + xdoe1[:, 1:] = x_cont2 + ydoe1 = f(xdoe1) + + xdoe1 = np.zeros((n_doe, 8)) + xdoe1[:, 0] = np.ones(n_doe) + xdoe1[:, 1:6] = x_cont2 + + xdoe2 = np.zeros((n_doe, 7)) + x_cont2 = x_cont[n_doe : 2 * n_doe, :6] + xdoe2[:, 0] = 2 * np.ones(n_doe) + xdoe2[:, 1:7] = x_cont2 + ydoe2 = f(xdoe2) + + xdoe2 = np.zeros((n_doe, 8)) + xdoe2[:, 0] = 2 * np.ones(n_doe) + xdoe2[:, 1:7] = x_cont2 + + xdoe3 = np.zeros((n_doe, 8)) + xdoe3[:, 0] = 3 * np.ones(n_doe) + xdoe3[:, 1:] = x_cont[2 * n_doe :, :] + ydoe3 = f(xdoe3) + + Xt = np.concatenate((xdoe1, xdoe2, xdoe3), axis=0) + Yt = np.concatenate((ydoe1, ydoe2, ydoe3), axis=0) + sm = MixedIntegerKrigingModel( + surrogate=KRG( + xspecs=xspecs, + categorical_kernel=MixIntKernelType.HOMO_HSPHERE, + theta0=[1e-2], + corr="abs_exp", + n_start=5, + ), + ) + sm.set_training_values(Xt, Yt) + sm.train() + y_s = sm.predict_values(Xt)[:, 0] + pred_RMSE = np.linalg.norm(y_s - Yt) / len(Yt) + + y_sv = sm.predict_variances(Xt)[:, 0] + var_RMSE = np.linalg.norm(y_sv) / len(Yt) + self.assertTrue(pred_RMSE < 1e-7) + print("Pred_RMSE", pred_RMSE) + self.assertTrue(var_RMSE < 1e-7) + self.assertTrue( + np.linalg.norm( + sm.predict_values( + np.array( + [ + [1, -1, -2, 8, 0, 2, 0, 0], + [2, -1, -2, 16, 1, 2, 1, 0], + [3, -1, -2, 32, 2, 2, 1, -2], + ] + ) + )[:, 0] + - sm.predict_values( + np.array( + [ + [1, -1, -2, 8, 0, 2, 10, 10], + [2, -1, -2, 16, 1, 2, 1, 10], + [3, -1, -2, 32, 2, 2, 1, -2], + ] + ) + )[:, 0] + ) + < 1e-8 + ) + self.assertTrue( + np.linalg.norm( + sm.predict_values(np.array([[1, -1, -2, 8, 0, 2, 0, 0]])) + - sm.predict_values(np.array([[1, -1, -2, 8, 0, 12, 10, 10]])) + ) + > 1e-8 + ) + + def test_mixed_gower_2D(self): + xt = np.array([[0, 5], [2, -1], [4, 0.5]]) + yt = np.array([[0.0], [1.0], [1.5]]) + xlimits = [["0.0", "1.0", " 2.0", "3.0", "4.0"], [-5, 5]] + xtypes = [(XType.ENUM, 5), XType.FLOAT] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + + # Surrogate + sm = MixedIntegerKrigingModel( + surrogate=KRG( + xspecs=xspecs, + theta0=[1e-2], + corr="abs_exp", + categorical_kernel=MixIntKernelType.GOWER, + ), + ) + sm.set_training_values(xt, yt) + sm.train() + + # DOE for validation + x = np.linspace(0, 4, 5) + x2 = np.linspace(-5, 5, 21) + x1 = [] + for element in itertools.product(x, x2): + x1.append(np.array(element)) + x_pred = np.array(x1) + + y = sm.predict_values(x_pred) + yvar = sm.predict_variances(x_pred) + + # prediction are correct on known points + self.assertTrue(np.abs(np.sum(np.array([y[20], y[50], y[95]]) - yt)) < 1e-6) + self.assertTrue(np.abs(np.sum(np.array([yvar[20], yvar[50], yvar[95]]))) < 1e-6) + + self.assertEqual(np.shape(y), (105, 1)) + + def test_mixed_homo_gaussian_2D(self): + xt = np.array([[0, 5], [2, -1], [4, 0.5]]) + yt = np.array([[0.0], [1.0], [1.5]]) + xlimits = [["0.0", "1.0", " 2.0", "3.0", "4.0"], [-5, 5]] + xtypes = [(XType.ENUM, 5), XType.FLOAT] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + # Surrogate + sm = MixedIntegerKrigingModel( + surrogate=KRG( + xspecs=xspecs, + theta0=[1e-2], + corr="abs_exp", + categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE, + ), + ) + sm.set_training_values(xt, yt) + sm.train() + + # DOE for validation + x = np.linspace(0, 4, 5) + x2 = np.linspace(-5, 5, 21) + x1 = [] + for element in itertools.product(x, x2): + x1.append(np.array(element)) + x_pred = np.array(x1) + + y = sm.predict_values(x_pred) + yvar = sm.predict_variances(x_pred) + + # prediction are correct on known points + self.assertTrue(np.abs(np.sum(np.array([y[20], y[50], y[95]]) - yt)) < 1e-6) + self.assertTrue(np.abs(np.sum(np.array([yvar[20], yvar[50], yvar[95]]))) < 1e-6) + + self.assertEqual(np.shape(y), (105, 1)) + + def test_mixed_homo_hyp_2D(self): + xt = np.array([[0, 5], [2, -1], [4, 0.5]]) + yt = np.array([[0.0], [1.0], [1.5]]) + xlimits = [["0.0", "1.0", " 2.0", "3.0", "4.0"], [-5, 5]] + xtypes = [(XType.ENUM, 5), XType.FLOAT] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + # Surrogate + sm = MixedIntegerKrigingModel( + surrogate=KRG( + xspecs=xspecs, + theta0=[1e-2], + categorical_kernel=MixIntKernelType.HOMO_HSPHERE, + corr="abs_exp", + ), + ) + sm.set_training_values(xt, yt) + sm.train() + + # DOE for validation + x = np.linspace(0, 4, 5) + x2 = np.linspace(-5, 5, 21) + x1 = [] + for element in itertools.product(x, x2): + x1.append(np.array(element)) + x_pred = np.array(x1) + + y = sm.predict_values(x_pred) + yvar = sm.predict_variances(x_pred) + + # prediction are correct on known points + self.assertTrue(np.abs(np.sum(np.array([y[20], y[50], y[95]]) - yt)) < 1e-6) + self.assertTrue(np.abs(np.sum(np.array([yvar[20], yvar[50], yvar[95]]))) < 1e-6) + + self.assertEqual(np.shape(y), (105, 1)) + + def test_mixed_homo_gaussian_3D_PLS(self): + xt = np.array([[0.5, 0, 5], [2, 3, 4], [5, 2, -1], [-2, 4, 0.5]]) + yt = np.array([[0.0], [3], [1.0], [1.5]]) + xlimits = [[-5, 5], ["0.0", "1.0", " 2.0", "3.0", "4.0"], [-5, 5]] + xtypes = [XType.FLOAT, (XType.ENUM, 5), XType.FLOAT] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + # Surrogate + sm = surrogate = KPLS( + xspecs=xspecs, + theta0=[1e-2], + n_comp=1, + categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE, + cat_kernel_comps=[3], + corr="squar_exp", + ) + sm.set_training_values(xt, yt) + sm.train() + + # DOE for validation + x = np.linspace(0, 4, 5) + x2 = np.linspace(-5, 5, 21) + x1 = [] + for element in itertools.product(x2, x, x2): + x1.append(np.array(element)) + x_pred = np.array(x1) + + i = 0 + i += 1 + y = sm.predict_values(x_pred) + yvar = sm.predict_variances(x_pred) + + self.assertTrue((np.abs(np.sum(np.array(sm.predict_values(xt) - yt)))) < 1e-6) + self.assertTrue((np.abs(np.sum(np.array(sm.predict_variances(xt) - 0)))) < 1e-6) + + def test_mixed_homo_gaussian_3D_PLS_cate(self): + xt = np.array([[0.5, 0, 5], [2, 3, 4], [5, 2, -1], [-2, 4, 0.5]]) + yt = np.array([[0.0], [3], [1.0], [1.5]]) + xlimits = [[-5, 5], ["0.0", "1.0", " 2.0", "3.0", "4.0"], [-5, 5]] + xtypes = [XType.FLOAT, (XType.ENUM, 5), XType.FLOAT] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + # Surrogate + sm = KPLS( + xspecs=xspecs, + theta0=[1e-2], + n_comp=2, + corr="abs_exp", + cat_kernel_comps=[3], + categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE, + ) + + sm.set_training_values(xt, yt) + sm.train() + + # DOE for validation + x = np.linspace(0, 4, 5) + x2 = np.linspace(-5, 5, 21) + x1 = [] + for element in itertools.product(x2, x, x2): + x1.append(np.array(element)) + x_pred = np.array(x1) + + i = 0 + i += 1 + y = sm.predict_values(x_pred) + yvar = sm.predict_variances(x_pred) + + self.assertTrue((np.abs(np.sum(np.array(sm.predict_values(xt) - yt)))) < 1e-6) + self.assertTrue((np.abs(np.sum(np.array(sm.predict_variances(xt) - 0)))) < 1e-6) + + def test_mixed_homo_hyp_3D_PLS_cate(self): + xt = np.array([[0.5, 0, 5], [2, 3, 4], [5, 2, -1], [-2, 4, 0.5]]) + yt = np.array([[0.0], [3], [1.0], [1.5]]) + xlimits = [[-5, 5], ["0.0", "1.0", " 2.0", "3.0", "4.0"], [-5, 5]] + xtypes = [XType.FLOAT, (XType.ENUM, 5), XType.FLOAT] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + # Surrogate + sm = MixedIntegerKrigingModel( + surrogate=KPLS( + xspecs=xspecs, + theta0=[1e-2], + n_comp=1, + categorical_kernel=MixIntKernelType.HOMO_HSPHERE, + cat_kernel_comps=[3], + corr="squar_exp", + ), + ) + sm.set_training_values(xt, yt) + sm.train() + + # DOE for validation + x = np.linspace(0, 4, 5) + x2 = np.linspace(-5, 5, 21) + x1 = [] + for element in itertools.product(x2, x, x2): + x1.append(np.array(element)) + x_pred = np.array(x1) + + i = 0 + i += 1 + y = sm.predict_values(x_pred) + yvar = sm.predict_variances(x_pred) + + self.assertTrue((np.abs(np.sum(np.array(sm.predict_values(xt) - yt)))) < 1e-6) + self.assertTrue((np.abs(np.sum(np.array(sm.predict_variances(xt) - 0)))) < 1e-6) + + def test_mixed_homo_gaussian_3D_ord_cate(self): + xt = np.array([[0.5, 0, 5], [2, 3, 4], [5, 2, -1], [-2, 4, 0.5]]) + yt = np.array([[0.0], [3], [1.0], [1.5]]) + xlimits = [ + ["0.0", "1.0", " 2.0", "3.0", "4.0"], + [-5, 5], + ["0.0", "1.0", " 2.0", "3.0"], + ] + xtypes = [(XType.ENUM, 5), XType.ORD, (XType.ENUM, 4)] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + # Surrogate + sm = MixedIntegerKrigingModel( + surrogate=KPLS( + xspecs=xspecs, + theta0=[1e-2], + n_comp=1, + categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE, + cat_kernel_comps=[3, 2], + corr="squar_exp", + ), + ) + sm.set_training_values(xt, yt) + sm.train() + + # DOE for validation + x = np.linspace(0, 4, 5) + x2 = np.linspace(-5, 5, 21) + x3 = np.linspace(0, 3, 4) + x1 = [] + for element in itertools.product(x, x2, x3): + x1.append(np.array(element)) + x_pred = np.array(x1) + + y = sm.predict_values(x_pred) + yvar = sm.predict_variances(x_pred) + + # prediction are correct on known points + self.assertTrue((np.abs(np.sum(np.array(sm.predict_values(xt) - yt)) < 1e-6))) + self.assertTrue((np.abs(np.sum(np.array(sm.predict_variances(xt) - 0)) < 1e-6))) + + def test_mixed_gower_3D(self): + xtypes = [XType.FLOAT, XType.ORD, XType.ORD] + xlimits = [[-10, 10], [-10, 10], [-10, 10]] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + mixint = MixedIntegerContext(xspecs=xspecs) + + sm = mixint.build_kriging_model( + KRG(categorical_kernel=MixIntKernelType.GOWER, print_prediction=False) + ) + sampling = mixint.build_sampling_method(LHS, criterion="m") + + fun = Sphere(ndim=3) + xt = sampling(10) + yt = fun(xt) + sm.set_training_values(xt, yt) + sm.train() + eq_check = True + for i in range(xt.shape[0]): + if abs(float(xt[i, :][1]) - int(float(xt[i, :][1]))) > 10e-8: + eq_check = False + self.assertTrue(eq_check) + + def test_examples(self): + self.run_mixed_gower_example() + self.run_mixed_homo_gaussian_example() + self.run_mixed_homo_hyp_example() + self.run_hierarchical_variables_Goldstein() + + def run_mixed_gower_example(self): + import numpy as np + import matplotlib.pyplot as plt + + from smt.surrogate_models import KRG, XType, XSpecs, MixIntKernelType + from smt.applications.mixed_integer import MixedIntegerKrigingModel + + xt1 = np.array([[0, 0.0], [0, 2.0], [0, 4.0]]) + xt2 = np.array([[1, 0.0], [1, 2.0], [1, 3.0]]) + xt3 = np.array([[2, 1.0], [2, 2.0], [2, 4.0]]) + + xt = np.concatenate((xt1, xt2, xt3), axis=0) + xt[:, 1] = xt[:, 1].astype(np.float64) + yt1 = np.array([0.0, 9.0, 16.0]) + yt2 = np.array([0.0, -4, -13.0]) + yt3 = np.array([-10, 3, 11.0]) + + yt = np.concatenate((yt1, yt2, yt3), axis=0) + xlimits = [["Blue", "Red", "Green"], [0.0, 4.0]] + xtypes = [(XType.ENUM, 3), XType.FLOAT] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + # Surrogate + sm = MixedIntegerKrigingModel( + surrogate=KRG( + xspecs=xspecs, + categorical_kernel=MixIntKernelType.GOWER, + theta0=[1e-1], + corr="squar_exp", + n_start=20, + ), + ) + sm.set_training_values(xt, yt) + sm.train() + + # DOE for validation + n = 100 + x_cat1 = [] + x_cat2 = [] + x_cat3 = [] + + for i in range(n): + x_cat1.append(0) + x_cat2.append(1) + x_cat3.append(2) + + x_cont = np.linspace(0.0, 4.0, n) + x1 = np.concatenate( + (np.asarray(x_cat1).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 + ) + x2 = np.concatenate( + (np.asarray(x_cat2).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 + ) + x3 = np.concatenate( + (np.asarray(x_cat3).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 + ) + + y1 = sm.predict_values(x1) + y2 = sm.predict_values(x2) + y3 = sm.predict_values(x3) + + # estimated variance + s2_1 = sm.predict_variances(x1) + s2_2 = sm.predict_variances(x2) + s2_3 = sm.predict_variances(x3) + + fig, axs = plt.subplots(3, figsize=(8, 6)) + + axs[0].plot(xt1[:, 1].astype(np.float64), yt1, "o", linestyle="None") + axs[0].plot(x_cont, y1, color="Blue") + axs[0].fill_between( + np.ravel(x_cont), + np.ravel(y1 - 3 * np.sqrt(s2_1)), + np.ravel(y1 + 3 * np.sqrt(s2_1)), + color="lightgrey", + ) + axs[0].set_xlabel("x") + axs[0].set_ylabel("y") + axs[0].legend( + ["Training data", "Prediction", "Confidence Interval 99%"], + loc="upper left", + bbox_to_anchor=[0, 1], + ) + axs[1].plot( + xt2[:, 1].astype(np.float64), yt2, marker="o", color="r", linestyle="None" + ) + axs[1].plot(x_cont, y2, color="Red") + axs[1].fill_between( + np.ravel(x_cont), + np.ravel(y2 - 3 * np.sqrt(s2_2)), + np.ravel(y2 + 3 * np.sqrt(s2_2)), + color="lightgrey", + ) + axs[1].set_xlabel("x") + axs[1].set_ylabel("y") + axs[1].legend( + ["Training data", "Prediction", "Confidence Interval 99%"], + loc="upper left", + bbox_to_anchor=[0, 1], + ) + axs[2].plot( + xt3[:, 1].astype(np.float64), yt3, marker="o", color="r", linestyle="None" + ) + axs[2].plot(x_cont, y3, color="Green") + axs[2].fill_between( + np.ravel(x_cont), + np.ravel(y3 - 3 * np.sqrt(s2_3)), + np.ravel(y3 + 3 * np.sqrt(s2_3)), + color="lightgrey", + ) + axs[2].set_xlabel("x") + axs[2].set_ylabel("y") + axs[2].legend( + ["Training data", "Prediction", "Confidence Interval 99%"], + loc="upper left", + bbox_to_anchor=[0, 1], + ) + plt.tight_layout() + plt.show() + + def run_mixed_homo_gaussian_example(self): + import numpy as np + import matplotlib.pyplot as plt + + from smt.surrogate_models import KRG, XType, XSpecs, MixIntKernelType + from smt.applications.mixed_integer import MixedIntegerKrigingModel + + xt1 = np.array([[0, 0.0], [0, 2.0], [0, 4.0]]) + xt2 = np.array([[1, 0.0], [1, 2.0], [1, 3.0]]) + xt3 = np.array([[2, 1.0], [2, 2.0], [2, 4.0]]) + + xt = np.concatenate((xt1, xt2, xt3), axis=0) + xt[:, 1] = xt[:, 1].astype(np.float64) + yt1 = np.array([0.0, 9.0, 16.0]) + yt2 = np.array([0.0, -4, -13.0]) + yt3 = np.array([-10, 3, 11.0]) + + yt = np.concatenate((yt1, yt2, yt3), axis=0) + xlimits = [["Blue", "Red", "Green"], [0.0, 4.0]] + xtypes = [(XType.ENUM, 3), XType.FLOAT] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + # Surrogate + sm = MixedIntegerKrigingModel( + surrogate=KRG( + xspecs=xspecs, + theta0=[1e-1], + corr="squar_exp", + n_start=20, + categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE, + ), + ) + sm.set_training_values(xt, yt) + sm.train() + + # DOE for validation + n = 100 + x_cat1 = [] + x_cat2 = [] + x_cat3 = [] + + for i in range(n): + x_cat1.append(0) + x_cat2.append(1) + x_cat3.append(2) + + x_cont = np.linspace(0.0, 4.0, n) + x1 = np.concatenate( + (np.asarray(x_cat1).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 + ) + x2 = np.concatenate( + (np.asarray(x_cat2).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 + ) + x3 = np.concatenate( + (np.asarray(x_cat3).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 + ) + + y1 = sm.predict_values(x1) + y2 = sm.predict_values(x2) + y3 = sm.predict_values(x3) + + # estimated variance + s2_1 = sm.predict_variances(x1) + s2_2 = sm.predict_variances(x2) + s2_3 = sm.predict_variances(x3) + + fig, axs = plt.subplots(3, figsize=(8, 6)) + + axs[0].plot(xt1[:, 1].astype(np.float64), yt1, "o", linestyle="None") + axs[0].plot(x_cont, y1, color="Blue") + axs[0].fill_between( + np.ravel(x_cont), + np.ravel(y1 - 3 * np.sqrt(s2_1)), + np.ravel(y1 + 3 * np.sqrt(s2_1)), + color="lightgrey", + ) + axs[0].set_xlabel("x") + axs[0].set_ylabel("y") + axs[0].legend( + ["Training data", "Prediction", "Confidence Interval 99%"], + loc="upper left", + bbox_to_anchor=[0, 1], + ) + axs[1].plot( + xt2[:, 1].astype(np.float64), yt2, marker="o", color="r", linestyle="None" + ) + axs[1].plot(x_cont, y2, color="Red") + axs[1].fill_between( + np.ravel(x_cont), + np.ravel(y2 - 3 * np.sqrt(s2_2)), + np.ravel(y2 + 3 * np.sqrt(s2_2)), + color="lightgrey", + ) + axs[1].set_xlabel("x") + axs[1].set_ylabel("y") + axs[1].legend( + ["Training data", "Prediction", "Confidence Interval 99%"], + loc="upper left", + bbox_to_anchor=[0, 1], + ) + axs[2].plot( + xt3[:, 1].astype(np.float64), yt3, marker="o", color="r", linestyle="None" + ) + axs[2].plot(x_cont, y3, color="Green") + axs[2].fill_between( + np.ravel(x_cont), + np.ravel(y3 - 3 * np.sqrt(s2_3)), + np.ravel(y3 + 3 * np.sqrt(s2_3)), + color="lightgrey", + ) + axs[2].set_xlabel("x") + axs[2].set_ylabel("y") + axs[2].legend( + ["Training data", "Prediction", "Confidence Interval 99%"], + loc="upper left", + bbox_to_anchor=[0, 1], + ) + plt.tight_layout() + plt.show() + + def run_mixed_homo_hyp_example(self): + import numpy as np + import matplotlib.pyplot as plt + + from smt.surrogate_models import KRG, XType, XSpecs, MixIntKernelType + from smt.applications.mixed_integer import MixedIntegerKrigingModel + + xt1 = np.array([[0, 0.0], [0, 2.0], [0, 4.0]]) + xt2 = np.array([[1, 0.0], [1, 2.0], [1, 3.0]]) + xt3 = np.array([[2, 1.0], [2, 2.0], [2, 4.0]]) + + xt = np.concatenate((xt1, xt2, xt3), axis=0) + xt[:, 1] = xt[:, 1].astype(np.float64) + yt1 = np.array([0.0, 9.0, 16.0]) + yt2 = np.array([0.0, -4, -13.0]) + yt3 = np.array([-10, 3, 11.0]) + + yt = np.concatenate((yt1, yt2, yt3), axis=0) + xlimits = [["Blue", "Red", "Green"], [0.0, 4.0]] + xtypes = [(XType.ENUM, 3), XType.FLOAT] + xspecs = XSpecs(xtypes=xtypes, xlimits=xlimits) + # Surrogate + sm = MixedIntegerKrigingModel( + surrogate=KRG( + xspecs=xspecs, + categorical_kernel=MixIntKernelType.HOMO_HSPHERE, + theta0=[1e-1], + corr="squar_exp", + n_start=20, + ), + ) + sm.set_training_values(xt, yt) + sm.train() + + # DOE for validation + n = 100 + x_cat1 = [] + x_cat2 = [] + x_cat3 = [] + + for i in range(n): + x_cat1.append(0) + x_cat2.append(1) + x_cat3.append(2) + + x_cont = np.linspace(0.0, 4.0, n) + x1 = np.concatenate( + (np.asarray(x_cat1).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 + ) + x2 = np.concatenate( + (np.asarray(x_cat2).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 + ) + x3 = np.concatenate( + (np.asarray(x_cat3).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1 + ) + + y1 = sm.predict_values(x1) + y2 = sm.predict_values(x2) + y3 = sm.predict_values(x3) + + # estimated variance + s2_1 = sm.predict_variances(x1) + s2_2 = sm.predict_variances(x2) + s2_3 = sm.predict_variances(x3) + + fig, axs = plt.subplots(3, figsize=(8, 6)) + + axs[0].plot(xt1[:, 1].astype(np.float64), yt1, "o", linestyle="None") + axs[0].plot(x_cont, y1, color="Blue") + axs[0].fill_between( + np.ravel(x_cont), + np.ravel(y1 - 3 * np.sqrt(s2_1)), + np.ravel(y1 + 3 * np.sqrt(s2_1)), + color="lightgrey", + ) + axs[0].set_xlabel("x") + axs[0].set_ylabel("y") + axs[0].legend( + ["Training data", "Prediction", "Confidence Interval 99%"], + loc="upper left", + bbox_to_anchor=[0, 1], + ) + axs[1].plot( + xt2[:, 1].astype(np.float64), yt2, marker="o", color="r", linestyle="None" + ) + axs[1].plot(x_cont, y2, color="Red") + axs[1].fill_between( + np.ravel(x_cont), + np.ravel(y2 - 3 * np.sqrt(s2_2)), + np.ravel(y2 + 3 * np.sqrt(s2_2)), + color="lightgrey", + ) + axs[1].set_xlabel("x") + axs[1].set_ylabel("y") + axs[1].legend( + ["Training data", "Prediction", "Confidence Interval 99%"], + loc="upper left", + bbox_to_anchor=[0, 1], + ) + axs[2].plot( + xt3[:, 1].astype(np.float64), yt3, marker="o", color="r", linestyle="None" + ) + axs[2].plot(x_cont, y3, color="Green") + axs[2].fill_between( + np.ravel(x_cont), + np.ravel(y3 - 3 * np.sqrt(s2_3)), + np.ravel(y3 + 3 * np.sqrt(s2_3)), + color="lightgrey", + ) + axs[2].set_xlabel("x") + axs[2].set_ylabel("y") + axs[2].legend( + ["Training data", "Prediction", "Confidence Interval 99%"], + loc="upper left", + bbox_to_anchor=[0, 1], + ) + plt.tight_layout() + plt.show() + + +if __name__ == "__main__": + TestMixedInteger().run_mixed_integer_context_example() + unittest.main() diff --git a/smt/utils/kriging.py b/smt/utils/kriging.py index 653fcc92d..3e215b4b0 100644 --- a/smt/utils/kriging.py +++ b/smt/utils/kriging.py @@ -30,7 +30,7 @@ class XSpecs: _xlimits : np.array list of corresponding domain which depends on variables types - _xroles: np.array + _xroles : np.array list of mixed integer variables roles """