From ea039d364e47865090043049c5b9c505f30c1326 Mon Sep 17 00:00:00 2001 From: Tiki Gonzalez Date: Tue, 30 Dec 2025 13:34:20 -0700 Subject: [PATCH 1/3] completed local warning fixes. --- Numerics/Data/Interpolation/CubicSpline.cs | 5 +- .../Data/Paired Data/OrderedPairedData.cs | 26 ++-- Numerics/Data/Paired Data/Ordinate.cs | 8 +- .../Data/Paired Data/ProbabilityOrdinate.cs | 4 +- .../Paired Data/UncertainOrderedPairedData.cs | 81 +++++++---- .../Data/Paired Data/UncertainOrdinate.cs | 65 ++++++--- Numerics/Data/Regression/LinearRegression.cs | 12 +- Numerics/Data/Statistics/Autocorrelation.cs | 24 ++-- Numerics/Data/Statistics/Histogram.cs | 6 +- Numerics/Data/Statistics/HypothesisTests.cs | 1 + Numerics/Data/Statistics/PlottingPositions.cs | 2 +- Numerics/Data/Statistics/Probability.cs | 8 +- Numerics/Data/Statistics/Statistics.cs | 2 +- Numerics/Data/Time Series/Support/Series.cs | 27 ++-- .../Time Series/Support/SeriesOrdinate.cs | 16 ++- .../Time Series/Support/TimeSeriesDownload.cs | 11 +- Numerics/Data/Time Series/TimeSeries.cs | 44 ++++-- .../Bivariate Copulas/AMHCopula.cs | 2 +- .../Base/ArchimedeanCopula.cs | 2 +- .../Bivariate Copulas/Base/BivariateCopula.cs | 4 +- .../Bivariate Copulas/FrankCopula.cs | 2 +- .../Bivariate Copulas/NormalCopula.cs | 2 +- .../Multivariate/BivariateEmpirical.cs | 13 +- .../Multivariate/MultivariateNormal.cs | 48 +++++-- .../Base/UnivariateDistributionBase.cs | 8 +- .../Base/UnivariateDistributionFactory.cs | 23 +++- .../Distributions/Univariate/Bernoulli.cs | 2 +- .../Univariate/BetaDistribution.cs | 2 +- Numerics/Distributions/Univariate/Binomial.cs | 2 +- Numerics/Distributions/Univariate/Cauchy.cs | 2 +- .../Distributions/Univariate/ChiSquared.cs | 2 +- .../Univariate/CompetingRisks.cs | 65 +++++---- .../Distributions/Univariate/Deterministic.cs | 2 +- .../Univariate/EmpiricalDistribution.cs | 17 ++- .../Distributions/Univariate/Exponential.cs | 2 +- .../Univariate/GammaDistribution.cs | 2 +- .../Univariate/GeneralizedBeta.cs | 2 +- .../Univariate/GeneralizedExtremeValue.cs | 2 +- .../Univariate/GeneralizedLogistic.cs | 2 +- .../Univariate/GeneralizedNormal.cs | 2 +- .../Univariate/GeneralizedPareto.cs | 2 +- .../Distributions/Univariate/Geometric.cs | 2 +- Numerics/Distributions/Univariate/Gumbel.cs | 2 +- .../Univariate/InverseChiSquared.cs | 2 +- .../Distributions/Univariate/InverseGamma.cs | 2 +- .../Distributions/Univariate/KappaFour.cs | 2 +- .../Distributions/Univariate/KernelDensity.cs | 14 +- Numerics/Distributions/Univariate/LnNormal.cs | 2 +- .../Distributions/Univariate/LogNormal.cs | 2 +- .../Univariate/LogPearsonTypeIII.cs | 2 +- Numerics/Distributions/Univariate/Logistic.cs | 2 +- Numerics/Distributions/Univariate/Mixture.cs | 51 ++++--- .../Distributions/Univariate/NoncentralT.cs | 2 +- Numerics/Distributions/Univariate/Normal.cs | 2 +- Numerics/Distributions/Univariate/Pareto.cs | 2 +- .../Univariate/PearsonTypeIII.cs | 4 +- Numerics/Distributions/Univariate/Pert.cs | 2 +- .../Univariate/PertPercentile.cs | 29 ++-- .../Univariate/PertPercentileZ.cs | 29 ++-- Numerics/Distributions/Univariate/Poisson.cs | 2 +- Numerics/Distributions/Univariate/Rayleigh.cs | 2 +- Numerics/Distributions/Univariate/StudentT.cs | 2 +- .../Distributions/Univariate/Triangular.cs | 2 +- .../Univariate/TruncatedDistribution.cs | 4 +- .../Univariate/TruncatedNormal.cs | 2 +- .../Uncertainty Analysis/BootstrapAnalysis.cs | 22 +-- .../UncertaintyAnalysisResults.cs | 83 ++++++----- Numerics/Distributions/Univariate/Uniform.cs | 2 +- .../Univariate/UniformDiscrete.cs | 2 +- Numerics/Distributions/Univariate/Weibull.cs | 2 +- Numerics/Functions/LinearFunction.cs | 2 +- Numerics/Functions/PowerFunction.cs | 2 +- Numerics/Functions/TabularFunction.cs | 2 +- .../Supervised/DecisionTree.cs | 2 +- .../Supervised/GeneralizedLinearModel.cs | 14 +- .../Supervised/KNearestNeighbors.cs | 4 +- .../Machine Learning/Supervised/NaiveBayes.cs | 8 +- .../Supervised/RandomForest.cs | 4 +- .../Machine Learning/Support/DecisionNode.cs | 4 +- .../Unsupervised/GaussianMixtureModel.cs | 4 +- .../Unsupervised/JenksNaturalBreaks.cs | 4 +- .../Differentiation/NumericalDerivative.cs | 12 +- .../Mathematics/Fourier Methods/Fourier.cs | 2 +- .../Integration/AdaptiveGuassKronrod.cs | 8 +- .../Integration/AdaptiveSimpsonsRule2D.cs | 4 +- Numerics/Mathematics/Integration/Miser.cs | 2 +- .../Integration/Support/Integrator.cs | 2 +- Numerics/Mathematics/Integration/Vegas.cs | 22 +-- .../Linear Algebra/Support/Matrix.cs | 10 +- .../Linear Algebra/Support/Vector.cs | 8 +- .../Optimization/Dynamic/Dijkstra.cs | 4 +- .../Optimization/Dynamic/Network.cs | 6 +- .../Mathematics/Optimization/Global/MLSL.cs | 8 +- .../Optimization/Global/MultiStart.cs | 4 +- .../Optimization/Global/ParticleSwarm.cs | 2 +- .../Mathematics/Optimization/Local/ADAM.cs | 2 +- .../Mathematics/Optimization/Local/BFGS.cs | 2 +- .../Optimization/Local/GradientDescent.cs | 2 +- .../Optimization/Support/Optimizer.cs | 10 +- .../Optimization/Support/ParameterSet.cs | 13 +- Numerics/Numerics.csproj | 12 +- Numerics/Sampling/Bootstrap/Bootstrap.cs | 10 +- Numerics/Sampling/MCMC/ARWMH.cs | 4 +- Numerics/Sampling/MCMC/Base/MCMCSampler.cs | 24 ++-- Numerics/Sampling/MCMC/HMC.cs | 2 +- Numerics/Sampling/MCMC/RWMH.cs | 2 +- Numerics/Sampling/MCMC/SNIS.cs | 4 +- Numerics/Sampling/MCMC/Support/MCMCResults.cs | 13 +- .../Sampling/MCMC/Support/ParameterResults.cs | 2 +- Numerics/Sampling/SobolSequence.cs | 4 +- Numerics/Sampling/StratificationBin.cs | 22 +-- Numerics/Sampling/StratificationOptions.cs | 22 +-- Numerics/Sampling/Stratify.cs | 2 +- Numerics/Utilities/ExtensionMethods.cs | 2 +- Numerics/Utilities/JsonConverters.cs | 28 ++-- Numerics/Utilities/SafeProgressReporter.cs | 26 ++-- Numerics/Utilities/Tools.cs | 4 +- .../Data/Interpolation/Test_Bilinear.cs | 46 +++---- .../Data/Interpolation/Test_CubicSpline.cs | 14 +- .../Data/Interpolation/Test_Linear.cs | 40 +++--- .../Data/Interpolation/Test_Polynomial.cs | 14 +- .../Data/Paired Data/Test_Ordinate.cs | 14 +- .../Test_PairedDataInterpolation.cs | 54 ++++---- Test_Numerics/Data/Statistics/Test_BoxCox.cs | 2 +- .../Data/Statistics/Test_HypothesisTests.cs | 4 +- .../Multivariate/Test_MultivariateNormal.cs | 28 ++-- .../Univariate/Test_ChiSquared.cs | 14 +- .../Univariate/Test_EmpiricalDistribution.cs | 18 +-- .../Univariate/Test_Exponential.cs | 52 +++---- .../Univariate/Test_GammaDistribution.cs | 106 +++++++------- .../Univariate/Test_GeneralizedBeta.cs | 28 ++-- .../Test_GeneralizedExtremeValue.cs | 128 ++++++++--------- .../Univariate/Test_GeneralizedLogistic.cs | 118 ++++++++-------- .../Univariate/Test_GeneralizedNormal.cs | 32 ++--- .../Univariate/Test_GeneralizedPareto.cs | 130 +++++++++--------- .../Univariate/Test_Geometric.cs | 74 +++++----- .../Distributions/Univariate/Test_Gumbel.cs | 90 ++++++------ .../Univariate/Test_InverseChiSquared.cs | 58 ++++---- .../Univariate/Test_InverseGamma.cs | 68 ++++----- .../Univariate/Test_KappaFour.cs | 40 +++--- .../Distributions/Univariate/Test_LnNormal.cs | 68 ++++----- .../Univariate/Test_LogNormal.cs | 58 ++++---- .../Univariate/Test_LogPearsonTypeIII.cs | 100 +++++++------- .../Distributions/Univariate/Test_Logistic.cs | 82 +++++------ .../Univariate/Test_NoncentralT.cs | 56 ++++---- .../Distributions/Univariate/Test_Normal.cs | 78 +++++------ .../Distributions/Univariate/Test_Pareto.cs | 64 ++++----- .../Univariate/Test_PearsonTypeIII.cs | 104 +++++++------- .../Distributions/Univariate/Test_Pert.cs | 48 +++---- .../Univariate/Test_PertPercentileDists.cs | 60 ++++---- .../Distributions/Univariate/Test_Poisson.cs | 58 ++++---- .../Distributions/Univariate/Test_Rayleigh.cs | 52 +++---- .../Distributions/Univariate/Test_StudentT.cs | 56 ++++---- .../Univariate/Test_Triangular.cs | 82 +++++------ .../Univariate/Test_TruncatedDistribution.cs | 66 ++++----- .../Univariate/Test_TruncatedNormal.cs | 66 ++++----- .../Distributions/Univariate/Test_Uniform.cs | 66 ++++----- .../Univariate/Test_UniformDiscrete.cs | 66 ++++----- .../Distributions/Univariate/Test_Weibull.cs | 76 +++++----- Test_Numerics/Functions/Test_Functions.cs | 4 +- .../Supervised/Test_DecisionTree.cs | 4 +- .../Supervised/Test_RandomForest.cs | 4 +- .../Machine Learning/Supervised/Test_kNN.cs | 2 +- .../Differentiation/Test_Differentiation.cs | 48 +++---- .../Mathematics/Integration/Test_Vegas.cs | 22 +-- .../Test_EigenValueDecomposition.cs | 6 +- .../Test_GaussJordanElimination.cs | 2 +- .../Special Functions/Test_Gamma.cs | 12 +- .../Test_SpecialFunctions.cs | 2 +- Test_Numerics/Sampling/Test_Stratification.cs | 20 +-- .../Serialization/JsonConverterDemo.cs | 4 +- .../Serialization/Test_JsonSerialization.cs | 52 ++++--- .../Utilities/Test_ExtensionMethods.cs | 10 +- Test_Numerics/Utilities/Test_Tools.cs | 32 ++--- 174 files changed, 2056 insertions(+), 1834 deletions(-) diff --git a/Numerics/Data/Interpolation/CubicSpline.cs b/Numerics/Data/Interpolation/CubicSpline.cs index 804feb3a..6a32a3b1 100644 --- a/Numerics/Data/Interpolation/CubicSpline.cs +++ b/Numerics/Data/Interpolation/CubicSpline.cs @@ -28,9 +28,6 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -using System; -using System.Collections.Generic; - namespace Numerics.Data { /// @@ -78,7 +75,7 @@ public CubicSpline(IList xValues, IList yValues, SortOrder sortO /// /// Stores the array of second derivatives. /// - private double[] y2; + private double[] y2 = Array.Empty(); /// /// Auxiliary routine to set the second derivatives. If you make changes to the x- or y-values, then you need to call this routine afterwards. diff --git a/Numerics/Data/Paired Data/OrderedPairedData.cs b/Numerics/Data/Paired Data/OrderedPairedData.cs index e225795f..8632cda5 100644 --- a/Numerics/Data/Paired Data/OrderedPairedData.cs +++ b/Numerics/Data/Paired Data/OrderedPairedData.cs @@ -35,6 +35,7 @@ using System.Data; using System.Linq; using System.Xml.Linq; +using System.Xml.Serialization; using Numerics.Distributions; namespace Numerics.Data @@ -102,10 +103,10 @@ public class OrderedPairedData : IList, INotifyCollectionChanged private bool _strictY; private SortOrder _orderX; private SortOrder _orderY; - private List _ordinates; + private readonly List _ordinates; /// - public event NotifyCollectionChangedEventHandler CollectionChanged; + public event NotifyCollectionChangedEventHandler? CollectionChanged; /// /// Represents if the paired dataset has valid ordinates and order. @@ -265,20 +266,24 @@ public OrderedPairedData(XElement el) { // Get Strictness bool strict = false; - if (el.Attribute(nameof(StrictX)) != null) { bool.TryParse(el.Attribute(nameof(StrictX)).Value, out strict); } + var strictXAttr = el.Attribute(nameof(StrictX)); + if (strictXAttr != null) { bool.TryParse(strictXAttr.Value, out strict); } StrictX = strict; strict = false; - if (el.Attribute(nameof(StrictY)) != null) { bool.TryParse(el.Attribute(nameof(StrictY)).Value, out strict); } + var strictYAttr = el.Attribute(nameof(StrictY)); + if (strictYAttr != null) { bool.TryParse(strictYAttr.Value, out strict); } StrictY = strict; // Get Order SortOrder order = SortOrder.None; - if (el.Attribute(nameof(OrderX)) != null) { Enum.TryParse(el.Attribute(nameof(OrderX)).Value, out order); } + var orderXAttr = el.Attribute(nameof(OrderX)); + if (orderXAttr != null) { Enum.TryParse(orderXAttr.Value, out order); } OrderX = order; order = SortOrder.None; - if (el.Attribute(nameof(OrderY)) != null) { Enum.TryParse(el.Attribute(nameof(OrderY)).Value, out order); } + var orderYAttr = el.Attribute(nameof(OrderY)); + if (orderYAttr != null) { Enum.TryParse(orderYAttr.Value, out order); } OrderY = order; // Ordinates @@ -612,7 +617,7 @@ IEnumerator IEnumerable.GetEnumerator() /// /// The object to compare with the current object. /// True if the specified object is equal to the current object; otherwise, False. - public override bool Equals(object obj) + public override bool Equals(object? obj) { if (obj is OrderedPairedData other) { @@ -1432,15 +1437,14 @@ private double TriangleArea(Ordinate point1, Ordinate point2, Ordinate point3) /// and number of points in the search region. public OrderedPairedData LangSimplify(double tolerance, int lookAhead) { - if (_ordinates == null | lookAhead <= 1 | tolerance <= 0) - return this; + if (lookAhead <= 1 | tolerance <= 0) { return this; } List ordinates = new List(); int count = _ordinates.Count; int offset; - if (lookAhead > count - 1) - lookAhead = count - 1; + if (lookAhead > count - 1) { lookAhead = count - 1; } + ordinates.Add(_ordinates[0]); for (int i = 0; i < count; i++) diff --git a/Numerics/Data/Paired Data/Ordinate.cs b/Numerics/Data/Paired Data/Ordinate.cs index fa8440aa..153787ae 100644 --- a/Numerics/Data/Paired Data/Ordinate.cs +++ b/Numerics/Data/Paired Data/Ordinate.cs @@ -79,8 +79,10 @@ public Ordinate(double xValue, double yValue) public Ordinate(XElement xElement) { double x = 0, y = 0; - if (xElement.Attribute(nameof(X)) != null) double.TryParse(xElement.Attribute(nameof(X)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out x); - if (xElement.Attribute(nameof(Y)) != null) double.TryParse(xElement.Attribute(nameof(Y)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out y); + var xAttribute = xElement.Attribute(nameof(X)); + var yAttribute = xElement.Attribute(nameof(Y)); + if (xAttribute != null) double.TryParse(xAttribute.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out x); + if (yAttribute != null) double.TryParse(yAttribute.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out y); X = x; Y = y; IsValid = true; @@ -367,7 +369,7 @@ public Ordinate Transform(Transform xTransform, Transform yTransform) /// /// The object to compare with the current object. /// True if the specified object is equal to the current object; otherwise, False. - public override bool Equals(object obj) + public override bool Equals(object? obj) { if (obj is Ordinate other) { diff --git a/Numerics/Data/Paired Data/ProbabilityOrdinate.cs b/Numerics/Data/Paired Data/ProbabilityOrdinate.cs index fc0e59ef..7d7dee7b 100644 --- a/Numerics/Data/Paired Data/ProbabilityOrdinate.cs +++ b/Numerics/Data/Paired Data/ProbabilityOrdinate.cs @@ -75,13 +75,13 @@ public class ProbabilityOrdinates : List, INotifyCollectionChanged, INot /// /// Occurs when the collection changes, for example when items are added or removed. /// - public event NotifyCollectionChangedEventHandler CollectionChanged; + public event NotifyCollectionChangedEventHandler? CollectionChanged; /// /// Occurs when a property value changes/> /// or the indexer Item[]. /// - public event PropertyChangedEventHandler PropertyChanged; + public event PropertyChangedEventHandler? PropertyChanged; /// /// Initializes a new instance of the class diff --git a/Numerics/Data/Paired Data/UncertainOrderedPairedData.cs b/Numerics/Data/Paired Data/UncertainOrderedPairedData.cs index ed442c8d..4d9ab6a2 100644 --- a/Numerics/Data/Paired Data/UncertainOrderedPairedData.cs +++ b/Numerics/Data/Paired Data/UncertainOrderedPairedData.cs @@ -32,6 +32,7 @@ using System.Collections; using System.Collections.Generic; using System.Collections.Specialized; +using System.Data; using System.Globalization; using System.Linq; using System.Xml.Linq; @@ -171,7 +172,7 @@ public SortOrder OrderY /// /// Handles the event of CollectionChanged /// - public event NotifyCollectionChangedEventHandler CollectionChanged; + public event NotifyCollectionChangedEventHandler? CollectionChanged; #endregion @@ -243,7 +244,12 @@ public UncertainOrderedPairedData(IList data, bool strictOnX, _orderY = yOrder; _uncertainOrdinates = new List(data.Count); for (int i = 0; i < data.Count; i++) - _uncertainOrdinates.Add(new UncertainOrdinate(data[i].X, data[i].Y.Clone())); + { + var o = data[i]; + UnivariateDistributionBase? yValue = o.Y?.Clone(); + if (yValue is not null) { _uncertainOrdinates.Add(new UncertainOrdinate(o.X, yValue)); } + } + Validate(); } @@ -266,8 +272,12 @@ private UncertainOrderedPairedData(IList data, bool strictOnX _orderY = yOrder; _uncertainOrdinates = new List(data.Count); for (int i = 0; i < data.Count; i++) - _uncertainOrdinates.Add(new UncertainOrdinate(data[i].X, data[i].Y.Clone())); - + { + var o = data[i]; + UnivariateDistributionBase? yValue = o.Y?.Clone(); + if (yValue is not null) { _uncertainOrdinates.Add(new UncertainOrdinate(o.X, yValue)); } + } + _isValid = dataValid; } @@ -277,34 +287,44 @@ private UncertainOrderedPairedData(IList data, bool strictOnX /// The XElement the UncertainOrderPairedData object is being created from. public UncertainOrderedPairedData(XElement el) { + var strictX = el.Attribute("X_Strict"); // Get Order - if (el.Attribute("X_Strict") != null) - bool.TryParse(el.Attribute("X_Strict").Value, out _strictX); - if (el.Attribute("Y_Strict") != null) - bool.TryParse(el.Attribute("Y_Strict").Value, out _strictY); + if (strictX != null) { bool.TryParse(strictX.Value, out _strictX); } + + var strictY = el.Attribute("Y_Strict"); + if (strictY != null) { bool.TryParse(strictY.Value, out _strictY); } + // Get Strictness - if (el.Attribute("X_Order") != null) - Enum.TryParse(el.Attribute("X_Order").Value, out _orderX); - if (el.Attribute("Y_Order") != null) - Enum.TryParse(el.Attribute("Y_Order").Value, out _orderY); + var orderX = el.Attribute("X_Order"); + if (orderX != null) { Enum.TryParse(orderX.Value, out _orderX); } + + var orderY = el.Attribute("Y_Order"); + if (orderY != null) { Enum.TryParse(orderY.Value, out _orderY); } + // Distribution type Distribution = UnivariateDistributionType.Deterministic; - if (el.Attribute("Distribution") != null) + var distributionAttr = el.Attribute("Distribution"); + if (distributionAttr != null) { var argresult = Distribution; - Enum.TryParse(el.Attribute("Distribution").Value, out argresult); + Enum.TryParse(distributionAttr.Value, out argresult); Distribution = argresult; } // new prop - - if (el.Attribute(nameof(AllowDifferentDistributionTypes)) != null) + var allowDiffAtr = el.Attribute(nameof(AllowDifferentDistributionTypes)); + if (allowDiffAtr != null) { - bool.TryParse(el.Attribute(nameof(AllowDifferentDistributionTypes)).Value, out _allowDifferentDistributionTypes); + bool.TryParse(allowDiffAtr.Value, out _allowDifferentDistributionTypes); // Get Ordinates var curveEl = el.Element("Ordinates"); _uncertainOrdinates = new List(); - foreach (XElement ord in curveEl.Elements(nameof(UncertainOrdinate))) - _uncertainOrdinates.Add(new UncertainOrdinate(ord)); + + if (curveEl != null) + { + foreach (XElement ord in curveEl.Elements(nameof(UncertainOrdinate))) + _uncertainOrdinates.Add(new UncertainOrdinate(ord)); + } + } else { @@ -315,15 +335,19 @@ public UncertainOrderedPairedData(XElement el) { foreach (XElement o in curveEl.Elements("Ordinate")) { - double.TryParse(o.Attribute("X").Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var xout); - xData.Add(xout); + var xAttr = o.Attribute("X"); + if ( xAttr != null && double.TryParse(xAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var xout)) { xData.Add(xout); } + else { xData.Add(0.0); } + var dist = UnivariateDistributionFactory.CreateDistribution(Distribution); var props = dist.GetParameterPropertyNames; var paramVals = new double[(props.Count())]; + for (int i = 0; i < props.Count(); i++) { - double.TryParse(o.Attribute(props[i]).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var result); - paramVals[i] = result; + var pAttr = o.Attribute(props[i]); + if ( pAttr != null && double.TryParse(pAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var result)) { paramVals[i] = result; } + else { paramVals[i] = 0.0; } } dist.SetParameters(paramVals); @@ -488,7 +512,14 @@ public List GetErrors() { if (left._uncertainOrdinates[i].X != right._uncertainOrdinates[i].X) return false; - if (left._uncertainOrdinates[i].Y == right._uncertainOrdinates[i].Y == false) + + var leftY = left._uncertainOrdinates[i].Y; + var rightY = right._uncertainOrdinates[i].Y; + if (leftY is null && rightY is null) + continue; + if (leftY is null || rightY is null) + return false; + if (!leftY.Equals(rightY)) return false; } return true; @@ -510,7 +541,7 @@ public List GetErrors() /// /// The object to compare with the current object. /// True if the specified object is equal to the current object; otherwise, False. - public override bool Equals(object obj) + public override bool Equals(object? obj) { if (obj is UncertainOrderedPairedData other) { diff --git a/Numerics/Data/Paired Data/UncertainOrdinate.cs b/Numerics/Data/Paired Data/UncertainOrdinate.cs index 1395f697..d3a81f7a 100644 --- a/Numerics/Data/Paired Data/UncertainOrdinate.cs +++ b/Numerics/Data/Paired Data/UncertainOrdinate.cs @@ -68,9 +68,7 @@ public UncertainOrdinate(double xValue, UnivariateDistributionBase yValue) { X = xValue; Y = yValue; - IsValid = true; - if (double.IsInfinity(X) || double.IsNaN(X) || Y == null || Y.ParametersValid == false) - IsValid = false; + IsValid = !(double.IsInfinity(X) || double.IsNaN(X) || Y is null || !Y.ParametersValid); } /// @@ -79,16 +77,18 @@ public UncertainOrdinate(double xValue, UnivariateDistributionBase yValue) /// The XElement to deserialize. public UncertainOrdinate(XElement xElement) { + var xAttr = xElement.Attribute(nameof(X)); double x = 0; - UnivariateDistributionBase dist = null; - if (xElement.Attribute(nameof(X)) != null) double.TryParse(xElement.Attribute(nameof(X)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out x); - if (xElement.Element("Distribution") != null) { dist = UnivariateDistributionFactory.CreateDistribution(xElement.Element("Distribution")); } + UnivariateDistributionBase? dist = null; + if (xAttr != null) { double.TryParse(xAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out x); } + + var distEl = xElement.Element("Distribution"); + if (distEl != null) { dist = UnivariateDistributionFactory.CreateDistribution(distEl); } // X = x; Y = dist; - IsValid = true; - if (double.IsInfinity(X) || double.IsNaN(X) || Y == null || Y.ParametersValid == false) - IsValid = false; + + IsValid = !(double.IsInfinity(X) || double.IsNaN(X) || Y is null || !Y.ParametersValid); } /// @@ -99,7 +99,8 @@ public UncertainOrdinate(XElement xElement) public UncertainOrdinate(XElement xElement, UnivariateDistributionType distributionType) { double x = 0; - if (xElement.Attribute(nameof(X)) != null) double.TryParse(xElement.Attribute(nameof(X)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out x); + var xElAttr = xElement.Attribute(nameof(X)); + if (xElAttr != null) double.TryParse(xElAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out x); // backwards compatibility var dist = UnivariateDistributionFactory.CreateDistribution(distributionType); var props = dist.GetParameterPropertyNames; @@ -107,7 +108,8 @@ public UncertainOrdinate(XElement xElement, UnivariateDistributionType distribut for (int i = 0; i < props.Count(); i++) { double p = 0; - if (xElement.Attribute(props[i]) != null) double.TryParse(xElement.Attribute(props[i]).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out p); + var xElPropsAttr = xElement.Attribute(props[i]); + if (xElPropsAttr != null) { double.TryParse(xElPropsAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out p); } paramVals[i] = p; } dist.SetParameters(paramVals); @@ -115,7 +117,7 @@ public UncertainOrdinate(XElement xElement, UnivariateDistributionType distribut X = x; Y = dist; IsValid = true; - if (double.IsInfinity(X) || double.IsNaN(X) || Y == null || Y.ParametersValid == false) + if (double.IsInfinity(X) || double.IsNaN(X) || Y is null || Y.ParametersValid == false) IsValid = false; } @@ -132,7 +134,7 @@ public UncertainOrdinate(XElement xElement, UnivariateDistributionType distribut /// /// Y distribution. /// - public UnivariateDistributionBase Y; + public UnivariateDistributionBase? Y; /// /// Boolean indicating if the ordinate has valid numeric values or not. @@ -151,6 +153,10 @@ public UncertainOrdinate(XElement xElement, UnivariateDistributionType distribut /// A 'sampled' ordinate value. public Ordinate GetOrdinate(double probability) { + if (Y is null) + { + throw new InvalidOperationException("Y distribution is not defined."); + } return new Ordinate(X, Y.InverseCDF(probability)); } @@ -160,6 +166,10 @@ public Ordinate GetOrdinate(double probability) /// A mean ordinate value. public Ordinate GetOrdinate() { + if (Y is null) + { + throw new InvalidOperationException("Y distribution is not defined."); + } return new Ordinate(X, Y.Mean); } @@ -182,9 +192,11 @@ public bool OrdinateValid(UncertainOrdinate ordinateToCompare, bool strictX, boo if (ordinateToCompare.IsValid == false) return false; // Check for equivalent distribution types - if (allowDifferentTypes == false && ordinateToCompare.Y.Type != Y.Type) + if (allowDifferentTypes == false && (ordinateToCompare.Y is null || Y is null || ordinateToCompare.Y.Type != Y.Type)) return false; + if (Y is null || ordinateToCompare.Y is null) + return false; double minPercentile = Y.Type == UnivariateDistributionType.PertPercentile || Y.Type == UnivariateDistributionType.PertPercentileZ ? 0.05 : 1E-5; // Test reasonable lower bound @@ -223,7 +235,7 @@ public List OrdinateErrors(UncertainOrdinate ordinateToCompare, bool str result.Add("Ordinate X value can not be infinity."); if (double.IsNaN(ordinateToCompare.X)) result.Add("Ordinate X value must be a valid number."); - if (ordinateToCompare.Y == null) + if (ordinateToCompare.Y is null) { result.Add("Ordinate Y value must be defined."); } @@ -234,6 +246,12 @@ public List OrdinateErrors(UncertainOrdinate ordinateToCompare, bool str } } // Check for equivalent distribution types + if(ordinateToCompare.Y is null || Y is null) + { + result.Add("Ordinate Y value must be defined."); + return result; + } + if (allowDifferentTypes == false && ordinateToCompare.Y.Type != Y.Type) result.Add("Can't compare two ordinates with different distribution types."); // Return False // @@ -266,7 +284,7 @@ public List OrdinateErrors() result.Add("Ordinate X value can not be infinity."); if (double.IsNaN(X)) result.Add("Ordinate X value must be a valid number."); - if (Y == null) + if (Y is null) result.Add("Ordinate Y value must be defined."); else if (Y.ParametersValid == false) { @@ -286,9 +304,13 @@ public List OrdinateErrors() /// True if two objects are numerically equal; otherwise, False. public static bool operator ==(UncertainOrdinate left, UncertainOrdinate right) { - //if (left == null || right == null) return false; + if (left.X != right.X) return false; + if (left.Y is null && right.Y is null) + return true; + if (left.Y is null || right.Y is null) + return false; if (left.Y != right.Y) return false; return true; @@ -310,7 +332,7 @@ public List OrdinateErrors() /// /// The object to compare with the current object. /// True if the specified object is equal to the current object; otherwise, False. - public override bool Equals(object obj) + public override bool Equals(object? obj) { if (obj is UncertainOrdinate other) { @@ -329,7 +351,7 @@ public override int GetHashCode() { int hash = 17; hash = hash * 23 + X.GetHashCode(); - hash = hash * 23 + Y.GetHashCode(); + hash = hash * 23 + (Y is not null ? Y.GetHashCode() : 0); return hash; } } @@ -341,7 +363,10 @@ public XElement ToXElement() { var result = new XElement(nameof(UncertainOrdinate)); result.SetAttributeValue(nameof(X), X.ToString("G17", CultureInfo.InvariantCulture)); - result.Add(Y.ToXElement()); + if (Y is not null) + { + result.Add(Y.ToXElement()); + } return result; } diff --git a/Numerics/Data/Regression/LinearRegression.cs b/Numerics/Data/Regression/LinearRegression.cs index d33a0442..50ec19cd 100644 --- a/Numerics/Data/Regression/LinearRegression.cs +++ b/Numerics/Data/Regression/LinearRegression.cs @@ -117,32 +117,32 @@ public LinearRegression(Matrix x, Vector y, bool hasIntercept = true) /// /// The list of estimated parameter values. /// - public List Parameters { get; private set; } + public List Parameters { get; private set; } = Array.Empty().ToList(); /// /// The list of the estimated parameter names. /// - public List ParameterNames { get; private set; } + public List ParameterNames { get; private set; } /// /// The list of the estimated parameter standard errors. /// - public List ParameterStandardErrors { get; private set; } + public List ParameterStandardErrors { get; private set; } = Array.Empty().ToList(); /// /// The list of the estimated parameter t-statistics. /// - public List ParameterTStats { get; private set; } + public List ParameterTStats { get; private set; } = Array.Empty().ToList(); /// /// The estimate parameter covariance matrix. /// - public Matrix Covariance { get; private set; } + public Matrix Covariance { get; private set; } = new Matrix(0, 0); /// /// The residuals of the fitted linear model. /// - public double[] Residuals { get; private set; } + public double[] Residuals { get; private set; } = Array.Empty(); /// /// The model standard error. diff --git a/Numerics/Data/Statistics/Autocorrelation.cs b/Numerics/Data/Statistics/Autocorrelation.cs index c2f08cd2..c08daec1 100644 --- a/Numerics/Data/Statistics/Autocorrelation.cs +++ b/Numerics/Data/Statistics/Autocorrelation.cs @@ -94,7 +94,7 @@ public enum Type /// A n x 2 matrix, with being the number of given input data points. The first column contains the lag and the /// second column contains the function evaluated at the given values. /// - public static double[,] Function(IList data, int lagMax = -1, Type type = Type.Correlation) + public static double[,]? Function(IList data, int lagMax = -1, Type type = Type.Correlation) { if (type == Type.Correlation) { @@ -123,7 +123,7 @@ public enum Type /// A n x 2 matrix, with being the number of given input data points. The first column contains the lag and the /// second column contains the function evaluated at the given values. /// - public static double[,] Function(TimeSeries timeSeries, int lagMax = -1, Type type = Type.Correlation) + public static double[,]? Function(TimeSeries timeSeries, int lagMax = -1, Type type = Type.Correlation) { if (type == Type.Correlation) { @@ -150,7 +150,7 @@ public enum Type /// A n x 2 matrix, with being the number of given input data points. The first column contains the lag and the /// second column contains the covariance of the given values. /// - private static double[,] Covariance(IList data, int lagMax = -1) + private static double[,]? Covariance(IList data, int lagMax = -1) { int n = data.Count; if (lagMax < 0) lagMax = (int)Math.Floor(Math.Min(10d * Math.Log10(n), n - 1)); @@ -177,7 +177,7 @@ public enum Type /// A n x 2 matrix, with being the number of given input data points. The first column contains the lag and the /// second column contains the covariance of the given values. /// - private static double[,] Covariance(TimeSeries timeSeries, int lagMax = -1) + private static double[,]? Covariance(TimeSeries timeSeries, int lagMax = -1) { int n = timeSeries.Count; if (lagMax < 0) lagMax = (int)Math.Floor(Math.Min(10d * Math.Log10(n), n - 1)); @@ -205,12 +205,15 @@ public enum Type /// A n x 2 matrix, with being the number of given input data points. The first column contains the lag and the /// second column contains the autocorrelation of the given values. /// - private static double[,] Correlation(IList data, int lagMax = -1) + private static double[,]? Correlation(IList data, int lagMax = -1) { int n = data.Count; if (lagMax < 0) lagMax = (int)Math.Floor(Math.Min(10d * Math.Log10(n), n - 1)); if (lagMax < 1 || n < 2) return null; var acf = Covariance(data, lagMax); + + if (acf == null) return null; + double den = acf[0, 1]; for (int i = 0; i < acf.GetLength(0); i++) acf[i, 1] /= den; @@ -226,12 +229,15 @@ public enum Type /// A n x 2 matrix, with being the number of given input data points. The first column contains the lag and the /// second column contains the autocorrelation of the given values. /// - private static double[,] Correlation(TimeSeries timeSeries, int lagMax = -1) + private static double[,]? Correlation(TimeSeries timeSeries, int lagMax = -1) { int n = timeSeries.Count; if (lagMax < 0) lagMax = (int)Math.Floor(Math.Min(10d * Math.Log10(n), n - 1)); if (lagMax < 1 || n < 2) return null; + var acf = Covariance(timeSeries, lagMax); + if (acf == null) return null; + double den = acf[0, 1]; for (int i = 0; i < acf.GetLength(0); i++) acf[i, 1] /= den; @@ -247,13 +253,14 @@ public enum Type /// A n x 2 matrix, with being the number of given input data points. The first column contains the lag and the /// second column contains the partial autocorrelation of the given values. /// - private static double[,] Partial(IList data, int lagMax = -1) + private static double[,]? Partial(IList data, int lagMax = -1) { int n = data.Count; if (lagMax < 0) lagMax = (int)Math.Floor(Math.Min(10d * Math.Log10(n), n - 1)); if (lagMax < 1 || n < 2) return null; // First compute the ACVF var acvf = Covariance(data, lagMax); + if (acvf == null) return null; // Then compute PACF using the Durbin-Levinson algorithm int i, j; var phis = new double[lagMax + 1]; @@ -293,13 +300,14 @@ public enum Type /// A n x 2 matrix, with being the number of given input data points. The first column contains the lag and the /// second column contains the partial autocorrelation of the given values. /// - private static double[,] Partial(TimeSeries timeSeries, int lagMax = -1) + private static double[,]? Partial(TimeSeries timeSeries, int lagMax = -1) { int n = timeSeries.Count; if (lagMax < 0) lagMax = (int)Math.Floor(Math.Min(10d * Math.Log10(n), n - 1)); if (lagMax < 1 || n < 2) return null; // First compute the ACVF var acvf = Covariance(timeSeries, lagMax); + if (acvf == null) return null; // Then compute PACF using the Durbin-Levinson algorithm int i, j; var phis = new double[lagMax + 1]; diff --git a/Numerics/Data/Statistics/Histogram.cs b/Numerics/Data/Statistics/Histogram.cs index 1ff14e83..201ff926 100644 --- a/Numerics/Data/Statistics/Histogram.cs +++ b/Numerics/Data/Statistics/Histogram.cs @@ -119,8 +119,10 @@ public double Midpoint /// +1 if this bin is lower than the compared bin. /// -1 otherwise. /// - public int CompareTo(Bin other) + public int CompareTo(Bin? other) { + if (other is null) { return 1; } + if (UpperBound > other.LowerBound && LowerBound < other.LowerBound) { throw new ArgumentException(nameof(other), "The bins cannot be overlapping."); @@ -149,7 +151,7 @@ public object Clone() /// Checks whether two histogram bins are equal. /// /// True if the bins are equal and false otherwise. - public override bool Equals(object obj) + public override bool Equals(object? obj) { if (!(obj is Bin)) { diff --git a/Numerics/Data/Statistics/HypothesisTests.cs b/Numerics/Data/Statistics/HypothesisTests.cs index 97d00084..6c50254b 100644 --- a/Numerics/Data/Statistics/HypothesisTests.cs +++ b/Numerics/Data/Statistics/HypothesisTests.cs @@ -245,6 +245,7 @@ public static double LjungBoxTest(IList sample, int lagMax = -1) int n = sample.Count; if (lagMax < 0) lagMax = (int)Math.Floor(Math.Min(10d * Math.Log10(n), n - 1)); var acf = Autocorrelation.Function(sample, lagMax, Autocorrelation.Type.Correlation); + if (acf == null) throw new Exception("Autocorrelation function could not be calculated."); double Q = 0; for (int k = 1; k <= lagMax; k++) Q += Tools.Sqr(acf[k, 1]) / (n - k); diff --git a/Numerics/Data/Statistics/PlottingPositions.cs b/Numerics/Data/Statistics/PlottingPositions.cs index fac4557c..aa74f137 100644 --- a/Numerics/Data/Statistics/PlottingPositions.cs +++ b/Numerics/Data/Statistics/PlottingPositions.cs @@ -82,7 +82,7 @@ public static double[] Function(int N, double alpha) /// The sample size. /// The plotting position formula type. /// An array of plotting positions of size N. - public static double[] Function(int N, PlottingPositions.PlottingPostionType plottingPostionType) + public static double[]? Function(int N, PlottingPositions.PlottingPostionType plottingPostionType) { if (plottingPostionType == PlottingPostionType.Blom) { diff --git a/Numerics/Data/Statistics/Probability.cs b/Numerics/Data/Statistics/Probability.cs index 9f33dbb3..77e9c4c6 100644 --- a/Numerics/Data/Statistics/Probability.cs +++ b/Numerics/Data/Statistics/Probability.cs @@ -184,7 +184,7 @@ public static double JointProbability(IList probabilities, DependencyTyp /// The correlation matrix defining the dependency. Default = null. /// The dependency type. Default = Correlation matrix. /// The joint probability. - public static double JointProbability(IList probabilities, int[] indicators, double[,] correlationMatrix = null, DependencyType dependency = DependencyType.CorrelationMatrix) + public static double JointProbability(IList probabilities, int[] indicators, double[,]? correlationMatrix = null, DependencyType dependency = DependencyType.CorrelationMatrix) { if (dependency == DependencyType.CorrelationMatrix && correlationMatrix != null) { @@ -312,7 +312,7 @@ public static double NegativeJointProbability(IList probabilities, int[] /// /// This method utilizes a modified version of Pandey's PCM method. /// - public static double JointProbabilityHPCM(IList probabilities, int[] indicators, double[,] correlationMatrix, double[] conditionalProbabilities = null) + public static double JointProbabilityHPCM(IList probabilities, int[] indicators, double[,] correlationMatrix, double[]? conditionalProbabilities = null) { // Validation Checks if (probabilities == null || probabilities.Count == 0) @@ -431,7 +431,7 @@ public static double JointProbabilityHPCM(IList probabilities, int[] ind /// /// The joint probability of the events, adjusted for dependencies as defined by the correlation matrix. The return value is between 0 and 1. /// - public static double JointProbabilityPCM(IList probabilities, int[] indicators, double[,] correlationMatrix, double[] conditionalProbabilities = null) + public static double JointProbabilityPCM(IList probabilities, int[] indicators, double[,] correlationMatrix, double[]? conditionalProbabilities = null) { // Validation Checks if (probabilities == null || probabilities.Count == 0) @@ -1899,7 +1899,7 @@ public static double CommonCauseAdjustment(IList probabilities) /// The correlation matrix defining the dependency. /// The dependency type. Default = Correlation matrix. /// The common cause adjustment factor. - public static double CommonCauseAdjustment(IList probabilities, double[,] correlationMatrix = null, DependencyType dependency = DependencyType.CorrelationMatrix) + public static double CommonCauseAdjustment(IList probabilities, double[,]? correlationMatrix = null, DependencyType dependency = DependencyType.CorrelationMatrix) { // Validation Checks if (probabilities == null || probabilities.Count == 0) diff --git a/Numerics/Data/Statistics/Statistics.cs b/Numerics/Data/Statistics/Statistics.cs index a45dc06d..1ab3e9af 100644 --- a/Numerics/Data/Statistics/Statistics.cs +++ b/Numerics/Data/Statistics/Statistics.cs @@ -368,7 +368,7 @@ public static double JackKnifeStandardError(IList data, Func /// Sample of data, no sorting is assumed. /// The statistic for estimating a sample. - public static double[] JackKnifeSample(IList data, Func, double> statistic) + public static double[]? JackKnifeSample(IList data, Func, double> statistic) { if (data == null) throw new ArgumentNullException(nameof(data)); if (data.Count == 0) return null; diff --git a/Numerics/Data/Time Series/Support/Series.cs b/Numerics/Data/Time Series/Support/Series.cs index 05685a06..a78cd46f 100644 --- a/Numerics/Data/Time Series/Support/Series.cs +++ b/Numerics/Data/Time Series/Support/Series.cs @@ -56,7 +56,7 @@ public abstract class Series : IList> _seriesOrdinates = new List>(); /// - public event NotifyCollectionChangedEventHandler CollectionChanged; + public event NotifyCollectionChangedEventHandler? CollectionChanged; /// public SeriesOrdinate this[int index] @@ -75,11 +75,13 @@ public SeriesOrdinate this[int index] } /// - object IList.this[int index] + object? IList.this[int index] { get { return _seriesOrdinates[index]; } set { + if (value is null) { throw new ArgumentNullException(nameof(value)); } + if (value.GetType() != typeof(SeriesOrdinate)) { if (_seriesOrdinates[index] != (SeriesOrdinate)value) @@ -109,7 +111,7 @@ object IList.this[int index] public bool IsFixedSize => false; /// - public object SyncRoot => _seriesOrdinates.Count > 0 ? _seriesOrdinates[0] : null; + public object SyncRoot => _seriesOrdinates.Count > 0 ? _seriesOrdinates[0]! : new object(); /// public bool IsSynchronized => false; @@ -117,14 +119,15 @@ object IList.this[int index] /// public virtual void Add(SeriesOrdinate item) { - if (item == null) throw new ArgumentNullException(nameof(item)); + if (item is null) throw new ArgumentNullException(nameof(item)); _seriesOrdinates.Add(item); RaiseCollectionChanged(new NotifyCollectionChangedEventArgs(NotifyCollectionChangedAction.Add, item, _seriesOrdinates.Count - 1)); } /// - public int Add(object item) + public int Add(object? item) { + if (item is null) throw new ArgumentNullException(nameof(item)); if (item.GetType() != typeof(SeriesOrdinate)) { return -1; } Add((SeriesOrdinate)item); return _seriesOrdinates.Count - 1; @@ -133,14 +136,15 @@ public int Add(object item) /// public virtual void Insert(int index, SeriesOrdinate item) { - if (item == null) throw new ArgumentNullException(nameof(item)); + if (item is null) throw new ArgumentNullException(nameof(item)); _seriesOrdinates.Insert(index, item); RaiseCollectionChanged(new NotifyCollectionChangedEventArgs(NotifyCollectionChangedAction.Add, item, index)); } /// - public void Insert(int index, object item) + public void Insert(int index, object? item) { + if (item is null) throw new ArgumentNullException(nameof(item)); if (item.GetType() == typeof(SeriesOrdinate)) { Insert(index, (SeriesOrdinate)item); @@ -160,8 +164,9 @@ public virtual bool Remove(SeriesOrdinate item) } /// - public void Remove(object item) + public void Remove(object? item) { + if (item is null) throw new ArgumentNullException(nameof(item)); if (item.GetType() == typeof(SeriesOrdinate)) { Remove((SeriesOrdinate)item); @@ -195,8 +200,9 @@ public bool Contains(SeriesOrdinate item) } /// - public bool Contains(object item) + public bool Contains(object? item) { + if (item is null) throw new ArgumentNullException(nameof(item)); if (item.GetType() == typeof(SeriesOrdinate)) { return Contains((SeriesOrdinate)item); @@ -226,8 +232,9 @@ public int IndexOf(SeriesOrdinate item) } /// - public int IndexOf(object item) + public int IndexOf(object? item) { + if (item is null) throw new ArgumentNullException(nameof(item)); if (item.GetType() == typeof(SeriesOrdinate)) { return _seriesOrdinates.IndexOf((SeriesOrdinate)item); diff --git a/Numerics/Data/Time Series/Support/SeriesOrdinate.cs b/Numerics/Data/Time Series/Support/SeriesOrdinate.cs index 9dc8c2d8..8cbde3f1 100644 --- a/Numerics/Data/Time Series/Support/SeriesOrdinate.cs +++ b/Numerics/Data/Time Series/Support/SeriesOrdinate.cs @@ -50,7 +50,11 @@ public class SeriesOrdinate : INotifyPropertyChanged, IEquatable /// /// Constructs a new series ordinate. /// - public SeriesOrdinate() { } + public SeriesOrdinate() + { + _index = default!; + _value = default!; + } /// /// Constructs a new series ordinate. @@ -74,7 +78,7 @@ public SeriesOrdinate(TIndex index, TValue value) protected TValue _value; /// - public event PropertyChangedEventHandler PropertyChanged; + public event PropertyChangedEventHandler? PropertyChanged; /// /// The index of the series ordinate. @@ -109,7 +113,7 @@ public virtual TValue Value } /// - public bool Equals(SeriesOrdinate other) + public bool Equals(SeriesOrdinate? other) { if (ReferenceEquals(other, null)) return false; if (ReferenceEquals(this, other)) return true; @@ -118,7 +122,7 @@ public bool Equals(SeriesOrdinate other) } /// - public override bool Equals(object obj) => Equals(obj as SeriesOrdinate); + public override bool Equals(object? obj) => Equals((SeriesOrdinate?)obj); /// /// Equality operator overload. @@ -143,8 +147,8 @@ public override int GetHashCode() unchecked { int hash = 17; - hash = hash * 23 + EqualityComparer.Default.GetHashCode(_index); - hash = hash * 23 + EqualityComparer.Default.GetHashCode(_value); + hash = hash * 23 + (_index is null ? 0 : EqualityComparer.Default.GetHashCode(_index)); + hash = hash * 23 + (_value is null ? 0 : EqualityComparer.Default.GetHashCode(_value)); return hash; } } diff --git a/Numerics/Data/Time Series/Support/TimeSeriesDownload.cs b/Numerics/Data/Time Series/Support/TimeSeriesDownload.cs index 41f3ea1b..223b60cf 100644 --- a/Numerics/Data/Time Series/Support/TimeSeriesDownload.cs +++ b/Numerics/Data/Time Series/Support/TimeSeriesDownload.cs @@ -403,7 +403,7 @@ private static string CreateURLForUSGSDownload(string siteNumber, TimeSeriesType using (GZipStream decompressionStream = new GZipStream(compressedStream, CompressionMode.Decompress)) using (StreamReader reader = new StreamReader(decompressionStream)) { - string line; + string? line; bool isHeader = true; while ((line = await reader.ReadLineAsync()) != null) @@ -770,7 +770,7 @@ public static async Task FromABOM( $"&station_no={Uri.EscapeDataString(stationNumber)}" + $"¶metertype_name={Uri.EscapeDataString(parameterType)}"; - string tsId = null; + string? tsId = null; // Create HttpClientHandler with automatic decompression var handler = new HttpClientHandler @@ -840,7 +840,7 @@ public static async Task FromABOM( for (int i = 0; i < headers.GetArrayLength(); i++) { - string header = headers[i].GetString(); + string? header = headers[i].GetString(); if (header == "ts_id") tsIdIndex = i; if (header == "ts_name") tsNameIndex = i; } @@ -852,8 +852,9 @@ public static async Task FromABOM( for (int i = 1; i < root.GetArrayLength(); i++) { var row = root[i]; - string tsName = tsNameIndex >= 0 ? row[tsNameIndex].GetString() : ""; + string? tsName = tsNameIndex >= 0 ? row[tsNameIndex].GetString() : ""; + if (tsName == null) continue; // Prioritize: DMQaQc.Merged.DailyMean.24HR or similar daily mean series if (tsName.Contains("DailyMean") || tsName.Contains("Daily Mean")) { @@ -940,7 +941,7 @@ public static async Task FromABOM( if (point.GetArrayLength() < 2) continue; // Parse timestamp - string timestampStr = point[0].GetString(); + string? timestampStr = point[0].GetString(); if (!DateTime.TryParse(timestampStr, out DateTime date)) continue; diff --git a/Numerics/Data/Time Series/TimeSeries.cs b/Numerics/Data/Time Series/TimeSeries.cs index ec18039a..53f1bb4d 100644 --- a/Numerics/Data/Time Series/TimeSeries.cs +++ b/Numerics/Data/Time Series/TimeSeries.cs @@ -141,20 +141,32 @@ public TimeSeries(TimeInterval timeInterval, DateTime startDate, IList d public TimeSeries(XElement xElement) { // Get time interval - if (xElement.Attribute(nameof(TimeInterval)) != null) - Enum.TryParse(xElement.Attribute(nameof(TimeInterval)).Value, out _timeInterval); + var timeIntervalAttr = xElement.Attribute(nameof(TimeInterval)); + if (timeIntervalAttr != null) + Enum.TryParse(timeIntervalAttr.Value, out _timeInterval); // Get Ordinates foreach (XElement ordinate in xElement.Elements("SeriesOrdinate")) { // Try to parse the invariant date string using TryParseExact // If it fails, do a regular try parse. - DateTime index; - if (!DateTime.TryParseExact(ordinate.Attribute("Index").Value, "o", CultureInfo.InvariantCulture, DateTimeStyles.RoundtripKind, out index)) + DateTime index = default; + var ordAttr = ordinate.Attribute("Index"); + if (ordAttr != null) { - DateTime.TryParse(ordinate.Attribute("Index").Value, out index); + if (!DateTime.TryParseExact(ordAttr.Value, "o", CultureInfo.InvariantCulture, DateTimeStyles.RoundtripKind, out index)) + { + DateTime.TryParse(ordAttr.Value, out index); + } + } + + double value = 0.0; + var ordVal = ordinate.Attribute("Value"); + if (ordVal != null) + { + double.TryParse(ordVal.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out value); } - double.TryParse(ordinate.Attribute("Value").Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var value); + Add(new SeriesOrdinate(index, value)); } } @@ -1115,7 +1127,7 @@ public TimeSeries ClipTimeSeries(DateTime startDate, DateTime endDate) /// The new time interval. /// Optional. Determines if values should be averaged (true) or cumulated (false) for larger time steps. Default = true. /// A new TimeSeries object with the new interval. - public TimeSeries ConvertTimeInterval(TimeInterval timeInterval, bool average = true) + public TimeSeries? ConvertTimeInterval(TimeInterval timeInterval, bool average = true) { var TS = TimeSeries.TimeIntervalInHours(TimeInterval); // The time step in hours var newTS = TimeSeries.TimeIntervalInHours(timeInterval); // The new time step in hours @@ -1467,7 +1479,7 @@ public TimeSeries CalendarYearSeries(BlockFunctionType blockFunction = BlockFunc var result = new TimeSeries(TimeInterval.Irregular); // First, perform smoothing function - TimeSeries smoothedSeries = null; + TimeSeries? smoothedSeries = null; if (smoothingFunction == SmoothingFunctionType.None) { smoothedSeries = Clone(); @@ -1486,6 +1498,7 @@ public TimeSeries CalendarYearSeries(BlockFunctionType blockFunction = BlockFunc } // Then, perform block function + if (smoothedSeries == null) return result; for (int i = smoothedSeries.StartDate.Year; i <= smoothedSeries.EndDate.Year; i++) { var blockData = smoothedSeries.Where(x => x.Index.Year == i).ToList(); @@ -1561,7 +1574,7 @@ public TimeSeries CustomYearSeries(int startMonth = 10, BlockFunctionType blockF var result = new TimeSeries(TimeInterval.Irregular); // First, perform smoothing function - TimeSeries smoothedSeries = null; + TimeSeries? smoothedSeries = null; if (smoothingFunction == SmoothingFunctionType.None) { smoothedSeries = Clone(); @@ -1580,6 +1593,7 @@ public TimeSeries CustomYearSeries(int startMonth = 10, BlockFunctionType blockF } // Then, shift the dates + if( smoothedSeries == null) return result; int shift = startMonth != 1 ? 12 - startMonth + 1 : 0; smoothedSeries = startMonth != 1 ? smoothedSeries.ShiftDatesByMonth(shift) : smoothedSeries; @@ -1667,7 +1681,7 @@ public TimeSeries CustomYearSeries(int startMonth, int endMonth, BlockFunctionTy var result = new TimeSeries(TimeInterval.Irregular); // First, perform smoothing function - TimeSeries smoothedSeries = null; + TimeSeries? smoothedSeries = null; if (smoothingFunction == SmoothingFunctionType.None) { smoothedSeries = Clone(); @@ -1686,6 +1700,7 @@ public TimeSeries CustomYearSeries(int startMonth, int endMonth, BlockFunctionTy } // Then, perform block function + if(smoothedSeries == null) return result; for (int i = smoothedSeries.StartDate.Year; i <= smoothedSeries.EndDate.Year; i++) { @@ -1779,7 +1794,7 @@ public TimeSeries MonthlySeries(BlockFunctionType blockFunction = BlockFunctionT var result = new TimeSeries(TimeInterval.Irregular); // Create smoothed series - TimeSeries smoothedSeries = null; + TimeSeries? smoothedSeries = null; if (smoothingFunction == SmoothingFunctionType.None) { smoothedSeries = Clone(); @@ -1797,6 +1812,7 @@ public TimeSeries MonthlySeries(BlockFunctionType blockFunction = BlockFunctionT smoothedSeries = Difference(period); } + if(smoothedSeries == null) return result; for (int i = smoothedSeries.StartDate.Year; i <= smoothedSeries.EndDate.Year; i++) { @@ -1875,7 +1891,7 @@ public TimeSeries QuarterlySeries(BlockFunctionType blockFunction = BlockFunctio var result = new TimeSeries(TimeInterval.Irregular); // Create smoothed series - TimeSeries smoothedSeries = null; + TimeSeries? smoothedSeries = null; if (smoothingFunction == SmoothingFunctionType.None) { smoothedSeries = Clone(); @@ -1893,6 +1909,7 @@ public TimeSeries QuarterlySeries(BlockFunctionType blockFunction = BlockFunctio smoothedSeries = Difference(period); } + if (smoothedSeries == null) return result; for (int i = smoothedSeries.StartDate.Year; i <= smoothedSeries.EndDate.Year; i++) { @@ -1987,7 +2004,7 @@ public TimeSeries QuarterlySeries(BlockFunctionType blockFunction = BlockFunctio public TimeSeries PeaksOverThresholdSeries(double threshold, int minStepsBetweenEvents = 1, SmoothingFunctionType smoothingFunction = SmoothingFunctionType.None, int period = 1) { // Create smoothed time series - TimeSeries smoothedSeries = null; + TimeSeries? smoothedSeries = null; if (smoothingFunction == SmoothingFunctionType.None) { smoothedSeries = Clone(); @@ -2009,6 +2026,7 @@ public TimeSeries PeaksOverThresholdSeries(double threshold, int minStepsBetween int i = 0, idx, idxMax; var clusters = new List(); + if(smoothedSeries == null) return new TimeSeries(TimeInterval.Irregular); while (i < smoothedSeries.Count) { if (!double.IsNaN(smoothedSeries[i].Value) && smoothedSeries[i].Value > threshold) diff --git a/Numerics/Distributions/Bivariate Copulas/AMHCopula.cs b/Numerics/Distributions/Bivariate Copulas/AMHCopula.cs index e1239d57..3b6ab3da 100644 --- a/Numerics/Distributions/Bivariate Copulas/AMHCopula.cs +++ b/Numerics/Distributions/Bivariate Copulas/AMHCopula.cs @@ -122,7 +122,7 @@ public override ArgumentOutOfRangeException ValidateParameter(double parameter, if (throwException) throw new ArgumentOutOfRangeException(nameof(Theta), "The dependency parameter θ (theta) must be less than or equal to " + ThetaMaximum.ToString() + "."); return new ArgumentOutOfRangeException(nameof(Theta), "The dependency parameter θ (theta) must be less than or equal to " + ThetaMaximum.ToString() + "."); } - return null; + return new ArgumentOutOfRangeException(nameof(Theta), "Parameter is valid."); } /// diff --git a/Numerics/Distributions/Bivariate Copulas/Base/ArchimedeanCopula.cs b/Numerics/Distributions/Bivariate Copulas/Base/ArchimedeanCopula.cs index 6bd55b8e..5b23539a 100644 --- a/Numerics/Distributions/Bivariate Copulas/Base/ArchimedeanCopula.cs +++ b/Numerics/Distributions/Bivariate Copulas/Base/ArchimedeanCopula.cs @@ -78,7 +78,7 @@ public override ArgumentOutOfRangeException ValidateParameter(double parameter, if (throwException) throw new ArgumentOutOfRangeException(nameof(Theta), "The dependency parameter θ (theta) must be less than or equal to " + ThetaMaximum.ToString() + "."); return new ArgumentOutOfRangeException(nameof(Theta), "The dependency parameter θ (theta) must be less than or equal to " + ThetaMaximum.ToString() + "."); } - return null; + return new ArgumentOutOfRangeException(nameof(Theta),"Parameter is valid"); } /// diff --git a/Numerics/Distributions/Bivariate Copulas/Base/BivariateCopula.cs b/Numerics/Distributions/Bivariate Copulas/Base/BivariateCopula.cs index 4015be24..60944869 100644 --- a/Numerics/Distributions/Bivariate Copulas/Base/BivariateCopula.cs +++ b/Numerics/Distributions/Bivariate Copulas/Base/BivariateCopula.cs @@ -108,10 +108,10 @@ public bool ParametersValid } /// - public virtual IUnivariateDistribution MarginalDistributionX { get; set; } + public virtual IUnivariateDistribution MarginalDistributionX { get; set; } = null!; /// - public virtual IUnivariateDistribution MarginalDistributionY { get; set; } + public virtual IUnivariateDistribution MarginalDistributionY { get; set; } = null!; /// public abstract string DisplayName { get; } diff --git a/Numerics/Distributions/Bivariate Copulas/FrankCopula.cs b/Numerics/Distributions/Bivariate Copulas/FrankCopula.cs index 6e0185fa..0b6a06a6 100644 --- a/Numerics/Distributions/Bivariate Copulas/FrankCopula.cs +++ b/Numerics/Distributions/Bivariate Copulas/FrankCopula.cs @@ -121,7 +121,7 @@ public override ArgumentOutOfRangeException ValidateParameter(double parameter, if (throwException) throw new ArgumentOutOfRangeException(nameof(Theta), "The dependency parameter θ (theta) must be less than or equal to " + ThetaMaximum.ToString() + "."); return new ArgumentOutOfRangeException(nameof(Theta), "The dependency parameter θ (theta) must be less than or equal to " + ThetaMaximum.ToString() + "."); } - return null; + return new ArgumentOutOfRangeException(nameof(Theta),"Parameter is valid."); } /// diff --git a/Numerics/Distributions/Bivariate Copulas/NormalCopula.cs b/Numerics/Distributions/Bivariate Copulas/NormalCopula.cs index 4eaaf0c0..7215ba75 100644 --- a/Numerics/Distributions/Bivariate Copulas/NormalCopula.cs +++ b/Numerics/Distributions/Bivariate Copulas/NormalCopula.cs @@ -137,7 +137,7 @@ public override ArgumentOutOfRangeException ValidateParameter(double parameter, if (throwException) throw new ArgumentOutOfRangeException(nameof(Theta), "The correlation parameter ρ (rho) must be less than " + ThetaMaximum.ToString() + "."); return new ArgumentOutOfRangeException(nameof(Theta), "The correlation parameter ρ (rho) must be less than " + ThetaMaximum.ToString() + "."); } - return null; + return new ArgumentOutOfRangeException(nameof(Theta),"The parameter is valid."); } /// diff --git a/Numerics/Distributions/Multivariate/BivariateEmpirical.cs b/Numerics/Distributions/Multivariate/BivariateEmpirical.cs index 8b334842..7a6e1d63 100644 --- a/Numerics/Distributions/Multivariate/BivariateEmpirical.cs +++ b/Numerics/Distributions/Multivariate/BivariateEmpirical.cs @@ -60,6 +60,9 @@ public BivariateEmpirical(Transform x1Transform = Transform.None, Transform x2Tr X1Transform = x1Transform; X2Transform = x2Transform; ProbabilityTransform = probabilityTransform; + X1Values = Array.Empty(); + X2Values = Array.Empty(); + ProbabilityValues = new double[0, 0]; } /// @@ -84,26 +87,26 @@ public BivariateEmpirical(IList x1Values, IList x2Values, double // ... // X1n P(n,1) P(n,n) - private Bilinear bilinear = null; + private Bilinear? bilinear = null; private bool _parametersValid = true; /// /// Return the array of X1 values (distribution 1). Points On the cumulative curve are specified /// with increasing value and increasing probability. /// - public double[] X1Values { get; private set; } + public double[] X1Values { get; private set; } = Array.Empty(); /// /// Return the array of X2 values (distribution 2). Points on the cumulative curve are specified /// with increasing value and increasing probability. /// - public double[] X2Values { get; private set; } + public double[] X2Values { get; private set; } = Array.Empty(); /// /// Return the array of probability values. Points on the cumulative curve are specified /// with increasing value and increasing probability. /// - public double[,] ProbabilityValues { get; private set; } + public double[,] ProbabilityValues { get; private set; } = new double[0, 0]; /// /// Determines the interpolation transform for the X1-values. @@ -183,7 +186,7 @@ public void SetParameters(IList x1Values, IList x2Values, double /// Array of X2 values. The X2-values represent the secondary values. There are columns in the table of probability values. /// Array of probability values. Range 0 ≤ p ≤ 1. /// Determines whether to throw an exception or not. - public ArgumentOutOfRangeException ValidateParameters(IList x1Values, IList x2Values, double[,] pValues, bool throwException) + public ArgumentOutOfRangeException? ValidateParameters(IList x1Values, IList x2Values, double[,] pValues, bool throwException) { if (x1Values.Count < 2) diff --git a/Numerics/Distributions/Multivariate/MultivariateNormal.cs b/Numerics/Distributions/Multivariate/MultivariateNormal.cs index 841a95d0..dbdcad75 100644 --- a/Numerics/Distributions/Multivariate/MultivariateNormal.cs +++ b/Numerics/Distributions/Multivariate/MultivariateNormal.cs @@ -89,24 +89,24 @@ public MultivariateNormal(double[] mean, double[,] covariance) private bool _parametersValid = true; private int _dimension = 0; - private double[] _mean; - private Matrix _covariance; + private double[] _mean = Array.Empty(); + private Matrix _covariance = Matrix.Identity(0); - private CholeskyDecomposition _cholesky; + private CholeskyDecomposition? _cholesky; private double _lnconstant; - private double[] _variance; - private double[] _standardDeviation; + private double[] _variance = Array.Empty(); + private double[] _standardDeviation = Array.Empty(); // variables required for the multivariate CDF - private Matrix _correlation; - private double[] _correl; + private Matrix _correlation = Matrix.Identity(0); + private double[] _correl = Array.Empty(); private Random _MVNUNI = new MersenneTwister(); private int _maxEvaluations = 100000; private double _absoluteError = 1E-4; private double _relativeError = 1E-4; - private double[] _lower; - private double[] _upper; - private int[] _infin; + private double[] _lower = Array.Empty(); + private double[] _upper = Array.Empty(); + private int[] _infin = Array.Empty(); private bool _correlationMatrixCreated = false; private bool _covSRTed = false; @@ -250,7 +250,9 @@ public double[] StandardDeviation /// /// Determines if the covariance matrix is positive definite. /// - public bool IsPositiveDefinite => _cholesky.IsPositiveDefinite; + /// var chol = _cholesky ?? throw new InvalidOperationException("Parameters not set."); + + public bool IsPositiveDefinite => _cholesky != null && _cholesky.IsPositiveDefinite; /// /// Set the distribution parameters. @@ -265,6 +267,16 @@ public void SetParameters(double[] mean, double[,] covariance) _dimension = mean.Length; _mean = mean; _covariance = new Matrix(covariance); + + _variance = new double[_dimension]; + _standardDeviation = new double[_dimension]; + for (int i = 0; i < _dimension; i++) + { + // assuming Matrix supports indexer [row,col] + _variance[i] = _covariance[i, i]; + _standardDeviation[i] = Math.Sqrt(_variance[i]); + } + _cholesky = new CholeskyDecomposition(_covariance); double lndet = _cholesky.LogDeterminant(); _lnconstant = -(Math.Log(2d * Math.PI) * _mean.Length + lndet) * 0.5d; @@ -349,7 +361,7 @@ public ArgumentOutOfRangeException ValidateParameters(double[] mean, double[,] c var ex = new ArgumentOutOfRangeException(nameof(Covariance), "Covariance matrix is not positive-definite."); if (throwException) throw ex; else return ex; } - return null; + return null!; } /// @@ -384,6 +396,8 @@ public double Mahalanobis(double[] x) var z = new double[_mean.Length]; for (int i = 0; i < x.Length; i++) z[i] = x[i] - _mean[i]; + if(_cholesky == null) + throw new InvalidOperationException("Parameters not set."); var a = _cholesky.Solve(new Vector(z)); double b = 0d; for (int i = 0; i < z.Length; i++) @@ -475,7 +489,9 @@ public double[] InverseCDF(double[] probabilities) var z = new double[Dimension]; for (int j = 0; j < Dimension; j++) z[j] = Normal.StandardZ(probabilities[j]); - // x = A*z + mu + + if (_cholesky == null) + throw new InvalidOperationException("Parameters not set."); var Az = _cholesky.L * z; for (int j = 0; j < Dimension; j++) sample[j] = Az[j] + _mean[j]; @@ -538,6 +554,8 @@ public static MultivariateNormal Bivariate(double mu1, double mu2, double sigma1 for (int j = 0; j < Dimension; j++) z[j] = Normal.StandardZ(rnd.NextDouble()); // x = A*z + mu + if (_cholesky == null) + throw new InvalidOperationException("Parameters not set."); var Az = _cholesky.L * z; for (int j = 0; j < Dimension; j++) sample[i, j] = Az[j] + _mean[j]; @@ -566,6 +584,8 @@ public static MultivariateNormal Bivariate(double mu1, double mu2, double sigma1 for (int j = 0; j < Dimension; j++) z[j] = Normal.StandardZ(r[i, j]); // x = A*z + mu + if(_cholesky == null) + throw new InvalidOperationException("Parameters not set."); var Az = _cholesky.L * z; for (int j = 0; j < Dimension; j++) sample[i, j] = Az[j] + _mean[j]; @@ -601,6 +621,8 @@ public static MultivariateNormal Bivariate(double mu1, double mu2, double sigma1 } } // x = A*z + mu + if(_cholesky == null) + throw new InvalidOperationException("Parameters not set."); var Az = _cholesky.L * z; for (int j = 0; j < Dimension; j++) sample[i, j] = Az[j] + _mean[j]; diff --git a/Numerics/Distributions/Univariate/Base/UnivariateDistributionBase.cs b/Numerics/Distributions/Univariate/Base/UnivariateDistributionBase.cs index 1a5cc312..08b96017 100644 --- a/Numerics/Distributions/Univariate/Base/UnivariateDistributionBase.cs +++ b/Numerics/Distributions/Univariate/Base/UnivariateDistributionBase.cs @@ -816,7 +816,7 @@ public virtual XElement ToXElement() /// /// The object to compare with the current object. /// True if the specified object is equal to the current object; otherwise, False. - public override bool Equals(object obj) + public override bool Equals(object? obj) { if (obj is UnivariateDistributionBase other) { @@ -839,7 +839,7 @@ public override int GetHashCode() if (Type == UnivariateDistributionType.Empirical) { var empirical = this as EmpiricalDistribution; - if (empirical != null) + if (empirical is not null) { foreach (var x in empirical.XValues) { @@ -871,9 +871,9 @@ public override int GetHashCode() /// /// A negative number if this instance's mean is less than the other; zero if equal; a positive number if greater. /// - public int CompareTo(UnivariateDistributionBase other) + public int CompareTo(UnivariateDistributionBase? other) { - if (other == null) + if (other is null) return 1; // non-null instance is considered greater than null return this.Mean.CompareTo(other.Mean); diff --git a/Numerics/Distributions/Univariate/Base/UnivariateDistributionFactory.cs b/Numerics/Distributions/Univariate/Base/UnivariateDistributionFactory.cs index 91a79eab..84570380 100644 --- a/Numerics/Distributions/Univariate/Base/UnivariateDistributionFactory.cs +++ b/Numerics/Distributions/Univariate/Base/UnivariateDistributionFactory.cs @@ -204,7 +204,10 @@ public static UnivariateDistributionBase CreateDistribution(UnivariateDistributi { distribution = new Weibull(); } - + if (distribution is null) + { + throw new ArgumentException("Distribution is not found."); + } return distribution; } @@ -215,13 +218,14 @@ public static UnivariateDistributionBase CreateDistribution(UnivariateDistributi /// /// A univariate distribution. /// - public static UnivariateDistributionBase CreateDistribution(XElement xElement) + public static UnivariateDistributionBase? CreateDistribution(XElement xElement) { UnivariateDistributionType type; - UnivariateDistributionBase dist = null; - if (xElement.Attribute(nameof(UnivariateDistributionBase.Type)) != null) + UnivariateDistributionBase? dist = null; + var xAttr = xElement.Attribute(nameof(UnivariateDistributionBase.Type)); + if (xAttr != null) { - Enum.TryParse(xElement.Attribute(nameof(UnivariateDistributionBase.Type)).Value, out type); + Enum.TryParse(xAttr.Value, out type); if (type == UnivariateDistributionType.Mixture) { @@ -249,14 +253,19 @@ public static UnivariateDistributionBase CreateDistribution(XElement xElement) } } + if (dist is null) + { + throw new ArgumentException("Distribution is not found."); + } var names = dist.GetParameterPropertyNames; var parms = dist.GetParameters; var vals = new double[dist.NumberOfParameters]; for (int i = 0; i < dist.NumberOfParameters; i++) { - if (xElement.Attribute(names[i]) != null) + var xAttrParm = xElement.Attribute(names[i]); + if (xAttrParm != null) { - double.TryParse(xElement.Attribute(names[i]).Value, System.Globalization.NumberStyles.Any, System.Globalization.CultureInfo.InvariantCulture, out vals[i]); + double.TryParse(xAttrParm.Value, System.Globalization.NumberStyles.Any, System.Globalization.CultureInfo.InvariantCulture, out vals[i]); } } dist.SetParameters(vals); diff --git a/Numerics/Distributions/Univariate/Bernoulli.cs b/Numerics/Distributions/Univariate/Bernoulli.cs index 68ed1473..72d9268e 100644 --- a/Numerics/Distributions/Univariate/Bernoulli.cs +++ b/Numerics/Distributions/Univariate/Bernoulli.cs @@ -225,7 +225,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par throw new ArgumentOutOfRangeException(nameof(Probability), "Probability must be between 0 and 1."); return new ArgumentOutOfRangeException(nameof(Probability), "Probability must be between 0 and 1."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/BetaDistribution.cs b/Numerics/Distributions/Univariate/BetaDistribution.cs index f09d1ba3..6cb56ba6 100644 --- a/Numerics/Distributions/Univariate/BetaDistribution.cs +++ b/Numerics/Distributions/Univariate/BetaDistribution.cs @@ -255,7 +255,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par return new ArgumentOutOfRangeException(nameof(Beta), "The shape parameter β (beta) must be positive."); } // - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/Binomial.cs b/Numerics/Distributions/Univariate/Binomial.cs index baa238d7..9ffc70cd 100644 --- a/Numerics/Distributions/Univariate/Binomial.cs +++ b/Numerics/Distributions/Univariate/Binomial.cs @@ -256,7 +256,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par throw new ArgumentOutOfRangeException(nameof(ProbabilityOfSuccess), "The number of trials (n) must be positive."); return new ArgumentOutOfRangeException(nameof(ProbabilityOfSuccess), "The number of trials (n) must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/Cauchy.cs b/Numerics/Distributions/Univariate/Cauchy.cs index 836784b1..639c725a 100644 --- a/Numerics/Distributions/Univariate/Cauchy.cs +++ b/Numerics/Distributions/Univariate/Cauchy.cs @@ -230,7 +230,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par throw new ArgumentOutOfRangeException(nameof(Gamma), "The scale parameter γ (gamma) must be positive."); return new ArgumentOutOfRangeException(nameof(Gamma), "The scale parameter γ (gamma) must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/ChiSquared.cs b/Numerics/Distributions/Univariate/ChiSquared.cs index 6a080b89..1f6cdaa7 100644 --- a/Numerics/Distributions/Univariate/ChiSquared.cs +++ b/Numerics/Distributions/Univariate/ChiSquared.cs @@ -242,7 +242,7 @@ public ArgumentOutOfRangeException ValidateParameters(int degreesOfFreedom, bool return new ArgumentOutOfRangeException(nameof(degreesOfFreedom), "The degrees of freedom ν (nu) must greater than or equal to one."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/CompetingRisks.cs b/Numerics/Distributions/Univariate/CompetingRisks.cs index 8a713821..e0ec9b98 100644 --- a/Numerics/Distributions/Univariate/CompetingRisks.cs +++ b/Numerics/Distributions/Univariate/CompetingRisks.cs @@ -79,14 +79,14 @@ public CompetingRisks(IUnivariateDistribution[] distributions) SetParameters(distributions); } - private UnivariateDistributionBase[] _distributions; - private EmpiricalDistribution _empiricalCDF; + private UnivariateDistributionBase[] _distributions = Array.Empty(); + private EmpiricalDistribution _empiricalCDF = null!; private bool _momentsComputed = false; private double u1, u2, u3, u4; private bool _empiricalCDFCreated = false; - private double[,] _correlationMatrix; + private double[,] _correlationMatrix = null!; private bool _mvnCreated = false; - private MultivariateNormal _mvn; + private MultivariateNormal _mvn = null!; /// /// Returns the array of univariate probability distributions. @@ -422,7 +422,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par return new ArgumentOutOfRangeException(nameof(Distributions), "One of the distributions have invalid parameters."); } } - return null; + return null!; } /// @@ -620,7 +620,7 @@ public override double InverseCDF(double probability) /// Returns a list of cumulative incidence functions. /// /// Optional. The stratification bins to integrate over. Default is 200 bins. - public List CumulativeIncidenceFunctions(List bins = null) + public List CumulativeIncidenceFunctions(List? bins = null) { // Get stratification bins if (bins == null) @@ -928,7 +928,7 @@ public override UnivariateDistributionBase Clone() ProbabilityTransform = ProbabilityTransform }; if (CorrelationMatrix != null) - cr.CorrelationMatrix = CorrelationMatrix.Clone() as double[,]; + cr.CorrelationMatrix = (double[,]) CorrelationMatrix.Clone(); return cr; } @@ -989,20 +989,22 @@ public override XElement ToXElement() /// /// The XElement to deserialize. /// A new competing risks distribution. - public static CompetingRisks FromXElement(XElement xElement) + public static CompetingRisks? FromXElement(XElement xElement) { UnivariateDistributionType type = UnivariateDistributionType.Deterministic; - if (xElement.Attribute(nameof(UnivariateDistributionBase.Type)) != null) + var xElAttr = xElement.Attribute(nameof(UnivariateDistributionBase.Type)); + if (xElAttr != null) { - Enum.TryParse(xElement.Attribute(nameof(UnivariateDistributionBase.Type)).Value, out type); + Enum.TryParse(xElAttr.Value, out type); } if (type == UnivariateDistributionType.CompetingRisks) { var distributions = new List(); - if (xElement.Attribute(nameof(Distributions)) != null) + var xDistAttr = xElement.Attribute(nameof(Distributions)); + if (xDistAttr != null) { - var types = xElement.Attribute(nameof(Distributions)).Value.Split('|'); + var types = xDistAttr.Value.Split('|'); for (int i = 0; i < types.Length; i++) { Enum.TryParse(types[i], out UnivariateDistributionType distType); @@ -1013,29 +1015,46 @@ public static CompetingRisks FromXElement(XElement xElement) if (xElement.Attribute(nameof(XTransform)) != null) { - Enum.TryParse(xElement.Attribute(nameof(XTransform)).Value, out Transform xTransform); - competingRisks.XTransform = xTransform; + var xTransformAttr = xElement.Attribute(nameof(XTransform)); + if (xTransformAttr != null) + { + Enum.TryParse(xTransformAttr.Value, out Transform xTransform); + competingRisks.XTransform = xTransform; + } } if (xElement.Attribute(nameof(ProbabilityTransform)) != null) { - Enum.TryParse(xElement.Attribute(nameof(ProbabilityTransform)).Value, out Transform probabilityTransform); - competingRisks.ProbabilityTransform = probabilityTransform; + var xProbabilityAttr = xElement.Attribute(nameof(ProbabilityTransform)); + if (xProbabilityAttr != null) + { + Enum.TryParse(xProbabilityAttr.Value, out Transform probabilityTransform); + competingRisks.ProbabilityTransform = probabilityTransform; + } } if (xElement.Attribute(nameof(MinimumOfRandomVariables)) != null) { - bool.TryParse(xElement.Attribute(nameof(MinimumOfRandomVariables)).Value, out bool minOfValues); - competingRisks.MinimumOfRandomVariables = minOfValues; + var xMinOfAttr = xElement.Attribute(nameof(MinimumOfRandomVariables)); + if (xMinOfAttr != null) + { + bool.TryParse(xMinOfAttr.Value, out bool minOfValues); + competingRisks.MinimumOfRandomVariables = minOfValues; + } } if (xElement.Attribute(nameof(Dependency)) != null) { - Enum.TryParse(xElement.Attribute(nameof(Dependency)).Value, out Probability.DependencyType dependency); - competingRisks.Dependency = dependency; + var xDependencyAttr = xElement.Attribute(nameof(Dependency)); + if (xDependencyAttr != null) + { + Enum.TryParse(xDependencyAttr.Value, out Probability.DependencyType dependency); + competingRisks.Dependency = dependency; + } } // Parameters - if (xElement.Attribute("Parameters") != null) - { - var vals = xElement.Attribute("Parameters").Value.Split('|'); + var xParametersAttr = xElement.Attribute("Parameters"); + if (xParametersAttr != null) + { + var vals = xParametersAttr.Value.Split('|'); var parameters = new List(); for (int i = 0; i < vals.Length; i++) { diff --git a/Numerics/Distributions/Univariate/Deterministic.cs b/Numerics/Distributions/Univariate/Deterministic.cs index 77bd7da7..99cdad1e 100644 --- a/Numerics/Distributions/Univariate/Deterministic.cs +++ b/Numerics/Distributions/Univariate/Deterministic.cs @@ -238,7 +238,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par throw new ArgumentOutOfRangeException(nameof(Probability), "The point value must be a number."); return new ArgumentOutOfRangeException(nameof(Probability), "The point value must be a number."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/EmpiricalDistribution.cs b/Numerics/Distributions/Univariate/EmpiricalDistribution.cs index 4627d6dc..af05b7e9 100644 --- a/Numerics/Distributions/Univariate/EmpiricalDistribution.cs +++ b/Numerics/Distributions/Univariate/EmpiricalDistribution.cs @@ -155,14 +155,19 @@ public EmpiricalDistribution(IList sample, PlottingPositions.PlottingPos { _xValues = sample.ToArray(); Array.Sort(_xValues); - _pValues = PlottingPositions.Function(_xValues.Count(), plottingPostionType); + + var pValues = PlottingPositions.Function(_xValues.Count(), plottingPostionType); + + if (pValues is null) { throw new InvalidOperationException("PlottingPositions.Function returned null."); } + _pValues = pValues; + opd = new OrderedPairedData(_xValues, _pValues, true, SortOrder.Ascending, true, SortOrder.Ascending); _momentsComputed = false; } - private double[] _xValues; - private double[] _pValues; - private OrderedPairedData opd; + private double[] _xValues = Array.Empty(); + private double[] _pValues = Array.Empty(); + private OrderedPairedData opd = default!; private bool _momentsComputed = false; private double u1, u2, u3, u4; @@ -407,7 +412,7 @@ public override void SetParameters(IList parameters) /// public override ArgumentOutOfRangeException ValidateParameters(IList parameters, bool throwException) { - return null; + return new ArgumentOutOfRangeException("The parameters are valid"); } /// @@ -699,7 +704,7 @@ public static EmpiricalDistribution Convolve(IList distri throw new ArgumentException("Distribution list cannot be null or empty.", nameof(distributions)); if (distributions.Count == 1) - return distributions[0].Clone() as EmpiricalDistribution; + return (EmpiricalDistribution) distributions[0].Clone(); if (numberOfPoints < 2) throw new ArgumentException("Number of points must be at least 2.", nameof(numberOfPoints)); diff --git a/Numerics/Distributions/Univariate/Exponential.cs b/Numerics/Distributions/Univariate/Exponential.cs index 5dbc109d..7bf019d1 100644 --- a/Numerics/Distributions/Univariate/Exponential.cs +++ b/Numerics/Distributions/Univariate/Exponential.cs @@ -285,7 +285,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par throw new ArgumentOutOfRangeException(nameof(Alpha), "The scale parameter α (alpha) must be positive."); return new ArgumentOutOfRangeException(nameof(Alpha), "The scale parameter α (alpha) must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/GammaDistribution.cs b/Numerics/Distributions/Univariate/GammaDistribution.cs index 6e23df39..5b8419a0 100644 --- a/Numerics/Distributions/Univariate/GammaDistribution.cs +++ b/Numerics/Distributions/Univariate/GammaDistribution.cs @@ -389,7 +389,7 @@ public ArgumentOutOfRangeException ValidateParameters(double scale, double shape throw new ArgumentOutOfRangeException(nameof(Kappa), "The shape parameter κ (kappa) must be positive."); return new ArgumentOutOfRangeException(nameof(Kappa), "The shape parameter κ (kappa) must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/GeneralizedBeta.cs b/Numerics/Distributions/Univariate/GeneralizedBeta.cs index 2c569aa6..24dd2472 100644 --- a/Numerics/Distributions/Univariate/GeneralizedBeta.cs +++ b/Numerics/Distributions/Univariate/GeneralizedBeta.cs @@ -399,7 +399,7 @@ public ArgumentOutOfRangeException ValidateParameters(double alpha, double beta, throw new ArgumentOutOfRangeException(nameof(Min), "The min cannot be greater than or equal to the max."); return new ArgumentOutOfRangeException(nameof(Min), "The min cannot be greater than or equal to the max."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/GeneralizedExtremeValue.cs b/Numerics/Distributions/Univariate/GeneralizedExtremeValue.cs index 18dd7c54..3d89127e 100644 --- a/Numerics/Distributions/Univariate/GeneralizedExtremeValue.cs +++ b/Numerics/Distributions/Univariate/GeneralizedExtremeValue.cs @@ -420,7 +420,7 @@ public ArgumentOutOfRangeException ValidateParameters(double location, double sc throw new ArgumentOutOfRangeException(nameof(Kappa), "The shape parameter κ (kappa) must be a number."); return new ArgumentOutOfRangeException(nameof(Kappa), "The shape parameter κ (kappa) must be a number."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/GeneralizedLogistic.cs b/Numerics/Distributions/Univariate/GeneralizedLogistic.cs index 0b4c9b4c..81093b4a 100644 --- a/Numerics/Distributions/Univariate/GeneralizedLogistic.cs +++ b/Numerics/Distributions/Univariate/GeneralizedLogistic.cs @@ -412,7 +412,7 @@ public ArgumentOutOfRangeException ValidateParameters(double location, double sc throw new ArgumentOutOfRangeException(nameof(Kappa), "The shape parameter κ (kappa) must be a number."); return new ArgumentOutOfRangeException(nameof(Kappa), "The shape parameter κ (kappa) must be a number."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/GeneralizedNormal.cs b/Numerics/Distributions/Univariate/GeneralizedNormal.cs index 2ac643d8..b6888930 100644 --- a/Numerics/Distributions/Univariate/GeneralizedNormal.cs +++ b/Numerics/Distributions/Univariate/GeneralizedNormal.cs @@ -368,7 +368,7 @@ public ArgumentOutOfRangeException ValidateParameters(double location, double sc throw new ArgumentOutOfRangeException(nameof(Kappa), "The shape parameter κ (kappa) must be a number."); return new ArgumentOutOfRangeException(nameof(Kappa), "The shape parameter κ (kappa) must be a number."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/GeneralizedPareto.cs b/Numerics/Distributions/Univariate/GeneralizedPareto.cs index 220a3c0a..bcdb1fac 100644 --- a/Numerics/Distributions/Univariate/GeneralizedPareto.cs +++ b/Numerics/Distributions/Univariate/GeneralizedPareto.cs @@ -412,7 +412,7 @@ public ArgumentOutOfRangeException ValidateParameters(double location, double sc throw new ArgumentOutOfRangeException(nameof(Kappa), "The shape parameter κ (kappa) must be a number."); return new ArgumentOutOfRangeException(nameof(Kappa), "The shape parameter κ (kappa) must be a number."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/Geometric.cs b/Numerics/Distributions/Univariate/Geometric.cs index 1ee2b39e..7dbe5b60 100644 --- a/Numerics/Distributions/Univariate/Geometric.cs +++ b/Numerics/Distributions/Univariate/Geometric.cs @@ -219,7 +219,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par throw new ArgumentOutOfRangeException(nameof(ProbabilityOfSuccess), "Probability must be between 0 and 1."); return new ArgumentOutOfRangeException(nameof(ProbabilityOfSuccess), "Probability must be between 0 and 1."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/Gumbel.cs b/Numerics/Distributions/Univariate/Gumbel.cs index 2c08a36f..97b1b428 100644 --- a/Numerics/Distributions/Univariate/Gumbel.cs +++ b/Numerics/Distributions/Univariate/Gumbel.cs @@ -290,7 +290,7 @@ public ArgumentOutOfRangeException ValidateParameters(double location, double sc throw new ArgumentOutOfRangeException(nameof(Alpha), "The scale parameter α (alpha) must be positive."); return new ArgumentOutOfRangeException(nameof(Alpha), "The scale parameter α (alpha) must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/InverseChiSquared.cs b/Numerics/Distributions/Univariate/InverseChiSquared.cs index 859afef1..433e2b41 100644 --- a/Numerics/Distributions/Univariate/InverseChiSquared.cs +++ b/Numerics/Distributions/Univariate/InverseChiSquared.cs @@ -269,7 +269,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par throw new ArgumentOutOfRangeException(nameof(Sigma), "The scale parameter σ (sigma) must be positive."); return new ArgumentOutOfRangeException(nameof(Sigma), "The scale parameter σ (sigma) must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/InverseGamma.cs b/Numerics/Distributions/Univariate/InverseGamma.cs index f2de885c..778b5a65 100644 --- a/Numerics/Distributions/Univariate/InverseGamma.cs +++ b/Numerics/Distributions/Univariate/InverseGamma.cs @@ -255,7 +255,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par throw new ArgumentOutOfRangeException(nameof(Alpha), "The shape parameter α (alpha) must be positive."); return new ArgumentOutOfRangeException(nameof(Alpha), "The shape parameter α (alpha) must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/KappaFour.cs b/Numerics/Distributions/Univariate/KappaFour.cs index a49a1350..390b6306 100644 --- a/Numerics/Distributions/Univariate/KappaFour.cs +++ b/Numerics/Distributions/Univariate/KappaFour.cs @@ -405,7 +405,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par throw new ArgumentOutOfRangeException(nameof(Hondo), "The shape parameter h (hondo) must be a number."); return new ArgumentOutOfRangeException(nameof(Hondo), "The shape parameter h (hondo) must be a number."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/KernelDensity.cs b/Numerics/Distributions/Univariate/KernelDensity.cs index b837afab..7cc8c808 100644 --- a/Numerics/Distributions/Univariate/KernelDensity.cs +++ b/Numerics/Distributions/Univariate/KernelDensity.cs @@ -152,14 +152,14 @@ public enum KernelType Uniform } - private double[] _sampleData; + private double[] _sampleData = Array.Empty(); private double _bandwidth; private KernelType _kernelDistribution; - private IKernel _kernel; + private IKernel _kernel = null!; private bool _cdfCreated = false; - private OrderedPairedData opd; + private OrderedPairedData opd = null!; private double u1, u2, u3, u4; - private double[] _weights; // one weight per sample (unnormalised) + private double[] _weights = null!; // one weight per sample (unnormalised) private double _sumW = 1.0; // Σ wᵢ (defaults to 1 for un‑weighted case) @@ -549,7 +549,7 @@ public double BandwidthRule(IList sampleData) /// /// Sample of data, no sorting is assumed. /// A list of weights. - public double BandwidthRule(IList sample, IList w = null) + public double BandwidthRule(IList sample, IList w = null!) { w ??= Enumerable.Repeat(1.0, sample.Count).ToArray(); double m = w.Zip(sample, (wi, xi) => wi * xi).Sum() / w.Sum(); @@ -574,7 +574,7 @@ public override void SetParameters(IList parameters) /// public override ArgumentOutOfRangeException ValidateParameters(IList parameters, bool throwException) { - return null; + return null!; } /// @@ -588,7 +588,7 @@ private ArgumentOutOfRangeException ValidateParameters(double value, bool throwE throw new ArgumentOutOfRangeException(nameof(Bandwidth), "The bandwidth must be a positive number!"); return new ArgumentOutOfRangeException(nameof(Bandwidth), "The bandwidth must be a positive number!"); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/LnNormal.cs b/Numerics/Distributions/Univariate/LnNormal.cs index 8956f4d9..9bd42ce4 100644 --- a/Numerics/Distributions/Univariate/LnNormal.cs +++ b/Numerics/Distributions/Univariate/LnNormal.cs @@ -311,7 +311,7 @@ public ArgumentOutOfRangeException ValidateParameters(double mean, double standa throw new ArgumentOutOfRangeException(nameof(Sigma), "Sigma must be positive."); return new ArgumentOutOfRangeException(nameof(Sigma), "Sigma must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/LogNormal.cs b/Numerics/Distributions/Univariate/LogNormal.cs index d341a8a4..f2829d76 100644 --- a/Numerics/Distributions/Univariate/LogNormal.cs +++ b/Numerics/Distributions/Univariate/LogNormal.cs @@ -365,7 +365,7 @@ public ArgumentOutOfRangeException ValidateParameters(double mu, double sigma, b throw new ArgumentOutOfRangeException(nameof(Sigma), "Sigma must be positive."); return new ArgumentOutOfRangeException(nameof(Sigma), "Sigma must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/LogPearsonTypeIII.cs b/Numerics/Distributions/Univariate/LogPearsonTypeIII.cs index afea3dc3..bd2ffdd1 100644 --- a/Numerics/Distributions/Univariate/LogPearsonTypeIII.cs +++ b/Numerics/Distributions/Univariate/LogPearsonTypeIII.cs @@ -529,7 +529,7 @@ public ArgumentOutOfRangeException ValidateParameters(double mu, double sigma, d throw new ArgumentOutOfRangeException(nameof(Gamma), "Gamma = " + gamma + ". Gamma must be greater than -5."); return new ArgumentOutOfRangeException(nameof(Gamma), "Gamma = " + gamma + ". Gamma must be greater than -5."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/Logistic.cs b/Numerics/Distributions/Univariate/Logistic.cs index a06db19d..9b934d01 100644 --- a/Numerics/Distributions/Univariate/Logistic.cs +++ b/Numerics/Distributions/Univariate/Logistic.cs @@ -281,7 +281,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par throw new ArgumentOutOfRangeException(nameof(Alpha), "The scale parameter α (alpha) must be positive."); return new ArgumentOutOfRangeException(nameof(Alpha), "The scale parameter α (alpha) must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/Mixture.cs b/Numerics/Distributions/Univariate/Mixture.cs index 5472f70b..a782d856 100644 --- a/Numerics/Distributions/Univariate/Mixture.cs +++ b/Numerics/Distributions/Univariate/Mixture.cs @@ -79,9 +79,9 @@ public Mixture(double[] weights, IUnivariateDistribution[] distributions) SetParameters(weights, distributions); } - private double[] _weights; - private UnivariateDistributionBase[] _distributions; - private EmpiricalDistribution _empiricalCDF; + private double[] _weights = Array.Empty(); + private UnivariateDistributionBase[] _distributions = null!; + private EmpiricalDistribution _empiricalCDF = null!; private bool _momentsComputed = false; private double u1, u2, u3, u4; private bool _empiricalCDFCreated = false; @@ -618,7 +618,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par return new ArgumentOutOfRangeException(nameof(Distributions), "Distribution " + (i + 1).ToString() + " has invalid parameters."); } } - return null; + return null!; } /// @@ -1086,27 +1086,30 @@ public override XElement ToXElement() public static Mixture FromXElement(XElement xElement) { UnivariateDistributionType type = UnivariateDistributionType.Deterministic; - if (xElement.Attribute(nameof(UnivariateDistributionBase.Type)) != null) + var univBaseAttr = xElement.Attribute(nameof(UnivariateDistributionBase.Type)); + if (univBaseAttr != null) { - Enum.TryParse(xElement.Attribute(nameof(UnivariateDistributionBase.Type)).Value, out type); + Enum.TryParse(univBaseAttr.Value, out type); } if (type == UnivariateDistributionType.Mixture) { var weights = new List(); var distributions = new List(); - if (xElement.Attribute(nameof(Weights)) != null) + var weightAttr = xElement.Attribute(nameof(Weights)); + if (weightAttr != null) { - var w = xElement.Attribute(nameof(Weights)).Value.Split('|'); + var w = weightAttr.Value.Split('|'); for (int i = 0; i < w.Length; i++) { double.TryParse(w[i], NumberStyles.Any, CultureInfo.InvariantCulture, out var weight); weights.Add(weight); } } - if (xElement.Attribute(nameof(Distributions)) != null) + var distAttr = xElement.Attribute(nameof(Distributions)); + if (distAttr != null) { - var types = xElement.Attribute(nameof(Distributions)).Value.Split('|'); + var types = distAttr.Value.Split('|'); for (int i = 0; i < types.Length; i++) { Enum.TryParse(types[i], out UnivariateDistributionType distType); @@ -1114,30 +1117,34 @@ public static Mixture FromXElement(XElement xElement) } } var mixture = new Mixture(weights.ToArray(), distributions.ToArray()); - - if (xElement.Attribute(nameof(IsZeroInflated)) != null) + var zeroInflatedAttr = xElement.Attribute(nameof(IsZeroInflated)); + if (zeroInflatedAttr != null) { - bool.TryParse(xElement.Attribute(nameof(IsZeroInflated)).Value, out var isZeroInflated); + bool.TryParse(zeroInflatedAttr.Value, out var isZeroInflated); mixture.IsZeroInflated = isZeroInflated; } - if (xElement.Attribute(nameof(ZeroWeight)) != null) + var zeroWeightAttr = xElement.Attribute(nameof(ZeroWeight)); + if (zeroWeightAttr != null) { - double.TryParse(xElement.Attribute(nameof(ZeroWeight)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var zeroWeight); + double.TryParse(zeroWeightAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var zeroWeight); mixture.ZeroWeight = zeroWeight; } - if (xElement.Attribute(nameof(XTransform)) != null) + var xTransformAttr = xElement.Attribute(nameof(XTransform)); + if (xTransformAttr != null) { - Enum.TryParse(xElement.Attribute(nameof(XTransform)).Value, out Transform xTransform); + Enum.TryParse(xTransformAttr.Value, out Transform xTransform); mixture.XTransform = xTransform; } - if (xElement.Attribute(nameof(ProbabilityTransform)) != null) + var xProbabilityTransformAttr = xElement.Attribute(nameof(ProbabilityTransform)); + if (xProbabilityTransformAttr != null) { - Enum.TryParse(xElement.Attribute(nameof(ProbabilityTransform)).Value, out Transform probabilityTransform); + Enum.TryParse(xProbabilityTransformAttr.Value, out Transform probabilityTransform); mixture.ProbabilityTransform = probabilityTransform; } - if (xElement.Attribute("Parameters") != null) + var xParametersAttr = xElement.Attribute("Parameters"); + if (xParametersAttr != null) { - var vals = xElement.Attribute("Parameters").Value.Split('|'); + var vals = xParametersAttr.Value.Split('|'); var parameters = new List(); for (int i = 0; i < vals.Length; i++) { @@ -1151,7 +1158,7 @@ public static Mixture FromXElement(XElement xElement) } else { - return null; + return null!; } } diff --git a/Numerics/Distributions/Univariate/NoncentralT.cs b/Numerics/Distributions/Univariate/NoncentralT.cs index e8b50077..e2b473fa 100644 --- a/Numerics/Distributions/Univariate/NoncentralT.cs +++ b/Numerics/Distributions/Univariate/NoncentralT.cs @@ -289,7 +289,7 @@ public ArgumentOutOfRangeException ValidateParameters(double v, double mu, bool throw new ArgumentOutOfRangeException(nameof(Noncentrality), "The noncentrality parameter μ (mu) must be a number."); return new ArgumentOutOfRangeException(nameof(Noncentrality), "The noncentrality parameter μ (mu) must be a number."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/Normal.cs b/Numerics/Distributions/Univariate/Normal.cs index f62911c2..5a993176 100644 --- a/Numerics/Distributions/Univariate/Normal.cs +++ b/Numerics/Distributions/Univariate/Normal.cs @@ -349,7 +349,7 @@ public ArgumentOutOfRangeException ValidateParameters(double location, double sc throw new ArgumentOutOfRangeException(nameof(Sigma), "Standard deviation must be positive."); return new ArgumentOutOfRangeException(nameof(Sigma), "Standard deviation must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/Pareto.cs b/Numerics/Distributions/Univariate/Pareto.cs index 5caed988..e1e0a85e 100644 --- a/Numerics/Distributions/Univariate/Pareto.cs +++ b/Numerics/Distributions/Univariate/Pareto.cs @@ -261,7 +261,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par throw new ArgumentOutOfRangeException(nameof(Alpha), "The shape parameter α (alpha) must be positive."); return new ArgumentOutOfRangeException(nameof(Alpha), "The shape parameter α (alpha) must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/PearsonTypeIII.cs b/Numerics/Distributions/Univariate/PearsonTypeIII.cs index 598bb216..74a67c67 100644 --- a/Numerics/Distributions/Univariate/PearsonTypeIII.cs +++ b/Numerics/Distributions/Univariate/PearsonTypeIII.cs @@ -406,7 +406,7 @@ public ArgumentOutOfRangeException ValidateParameters(double mu, double sigma, d throw new ArgumentOutOfRangeException(nameof(Gamma), "Gamma = " + gamma + ". Gamma must be greater than -5."); return new ArgumentOutOfRangeException(nameof(Gamma), "Gamma = " + gamma + ". Gamma must be greater than -5."); } - return null; + return null!; } /// @@ -814,7 +814,7 @@ public override double[] ConditionalMoments(double a, double b) private (bool stable, bool divisable) TryPearsonConditionalMoments( double a, double b, out double[] moments, double pMin) { - moments = null; + moments = null!; // ---- small math helpers ------------------------------------------------- static double Phi(double x) => 0.5 * (1.0 + Mathematics.SpecialFunctions.Erf.Function(x / Math.Sqrt(2.0))); diff --git a/Numerics/Distributions/Univariate/Pert.cs b/Numerics/Distributions/Univariate/Pert.cs index e7c0e81c..62235836 100644 --- a/Numerics/Distributions/Univariate/Pert.cs +++ b/Numerics/Distributions/Univariate/Pert.cs @@ -340,7 +340,7 @@ private ArgumentOutOfRangeException ValidateParameters(double min, double mode, if (throwException) throw new ArgumentOutOfRangeException(nameof(MostLikely), "The mode (most likely) must be between the min and max."); return new ArgumentOutOfRangeException(nameof(MostLikely), "The mode (most likely) must be between the min and max."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/PertPercentile.cs b/Numerics/Distributions/Univariate/PertPercentile.cs index 24b4e4f4..08d2c83a 100644 --- a/Numerics/Distributions/Univariate/PertPercentile.cs +++ b/Numerics/Distributions/Univariate/PertPercentile.cs @@ -326,7 +326,7 @@ private ArgumentOutOfRangeException ValidateParameters(double fifth, double fift if (throwException) throw new ArgumentOutOfRangeException(nameof(Percentile50th), "The 50% must be between the 5% and 95%."); return new ArgumentOutOfRangeException(nameof(Percentile50th), "The 50% must be between the 5% and 95%."); } - return null; + return null!; } /// @@ -507,17 +507,19 @@ public override XElement ToXElement() public static PertPercentile FromXElement(XElement xElement) { UnivariateDistributionType type = UnivariateDistributionType.Deterministic; - if (xElement.Attribute(nameof(UnivariateDistributionBase.Type)) != null) + var xUnivAttr = xElement.Attribute(nameof(UnivariateDistributionBase.Type)); + if ( xUnivAttr != null) { - Enum.TryParse(xElement.Attribute(nameof(UnivariateDistributionBase.Type)).Value, out type); + Enum.TryParse(xUnivAttr.Value, out type); } if (type == UnivariateDistributionType.PertPercentile) { bool parametersSolved = false; - if (xElement.Attribute("ParametersSolved") != null) + var xParamSolvedAttr = xElement.Attribute("ParametersSolved"); + if (xParamSolvedAttr != null) { - bool.TryParse(xElement.Attribute("ParametersSolved").Value, out parametersSolved); + bool.TryParse(xParamSolvedAttr.Value, out parametersSolved); } else { @@ -528,9 +530,10 @@ public static PertPercentile FromXElement(XElement xElement) var vals = new double[dist.NumberOfParameters]; for (int i = 0; i < dist.NumberOfParameters; i++) { - if (xElement.Attribute(names[i]) != null) + var xAttr = xElement.Attribute(names[i]); + if (xAttr != null) { - double.TryParse(xElement.Attribute(names[i]).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out vals[i]); + double.TryParse(xAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out vals[i]); } } dist.SetParameters(vals); @@ -539,9 +542,10 @@ public static PertPercentile FromXElement(XElement xElement) } var beta = new GeneralizedBeta(); - if (xElement.Attribute("BetaParameters") != null) + var xBetaAttr = xElement.Attribute("BetaParameters"); + if (xBetaAttr != null) { - var vals = xElement.Attribute("BetaParameters").Value.Split('|'); + var vals = xBetaAttr.Value.Split('|'); var parameters = new List(); for (int i = 0; i < vals.Length; i++) { @@ -551,9 +555,10 @@ public static PertPercentile FromXElement(XElement xElement) beta.SetParameters(parameters); } double _5th = 0, _50th = 0, _95th = 0; - if (xElement.Attribute("Parameters") != null) + var xParamAttr = xElement.Attribute("Parameters"); + if (xParamAttr != null) { - var vals = xElement.Attribute("Parameters").Value.Split('|'); + var vals = xParamAttr.Value.Split('|'); double.TryParse(vals[0], NumberStyles.Any, CultureInfo.InvariantCulture, out _5th); double.TryParse(vals[1], NumberStyles.Any, CultureInfo.InvariantCulture, out _50th); double.TryParse(vals[2], NumberStyles.Any, CultureInfo.InvariantCulture, out _95th); @@ -571,7 +576,7 @@ public static PertPercentile FromXElement(XElement xElement) } else { - return null; + return null!; } } diff --git a/Numerics/Distributions/Univariate/PertPercentileZ.cs b/Numerics/Distributions/Univariate/PertPercentileZ.cs index 554c3aae..b32bd60f 100644 --- a/Numerics/Distributions/Univariate/PertPercentileZ.cs +++ b/Numerics/Distributions/Univariate/PertPercentileZ.cs @@ -330,7 +330,7 @@ private ArgumentOutOfRangeException ValidateParameters(double fifth, double fift if (throwException) throw new ArgumentOutOfRangeException(nameof(Percentile95th), "The percentiles must be between 0 and 1."); return new ArgumentOutOfRangeException(nameof(Percentile95th), "The percentiles must be between 0 and 1."); } - return null; + return null!; } /// @@ -504,17 +504,19 @@ public override XElement ToXElement() public static PertPercentileZ FromXElement(XElement xElement) { UnivariateDistributionType type = UnivariateDistributionType.Deterministic; - if (xElement.Attribute(nameof(UnivariateDistributionBase.Type)) != null) + var xUnivAttr = xElement.Attribute(nameof(UnivariateDistributionBase.Type)); + if ( xUnivAttr != null) { - Enum.TryParse(xElement.Attribute(nameof(UnivariateDistributionBase.Type)).Value, out type); + Enum.TryParse(xUnivAttr.Value, out type); } if (type == UnivariateDistributionType.PertPercentileZ) { bool parametersSolved = false; - if (xElement.Attribute("ParametersSolved") != null) + var xParamSolvedAttr = xElement.Attribute("ParametersSolved"); + if (xParamSolvedAttr != null) { - bool.TryParse(xElement.Attribute("ParametersSolved").Value, out parametersSolved); + bool.TryParse(xParamSolvedAttr.Value, out parametersSolved); } else { @@ -525,9 +527,10 @@ public static PertPercentileZ FromXElement(XElement xElement) var vals = new double[dist.NumberOfParameters]; for (int i = 0; i < dist.NumberOfParameters; i++) { - if (xElement.Attribute(names[i]) != null) + var xAttr = xElement.Attribute(names[i]); + if (xAttr != null) { - double.TryParse(xElement.Attribute(names[i]).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out vals[i]); + double.TryParse(xAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out vals[i]); } } dist.SetParameters(vals); @@ -536,9 +539,10 @@ public static PertPercentileZ FromXElement(XElement xElement) } var beta = new GeneralizedBeta(); - if (xElement.Attribute("BetaParameters") != null) + var xBetaAttr = xElement.Attribute("BetaParameters"); + if (xBetaAttr != null) { - var vals = xElement.Attribute("BetaParameters").Value.Split('|'); + var vals = xBetaAttr.Value.Split('|'); var parameters = new List(); for (int i = 0; i < vals.Length; i++) { @@ -548,9 +552,10 @@ public static PertPercentileZ FromXElement(XElement xElement) beta.SetParameters(parameters); } double _5th = 0, _50th = 0, _95th = 0; - if (xElement.Attribute("Parameters") != null) + var xParamAttr = xElement.Attribute("Parameters"); + if (xParamAttr != null) { - var vals = xElement.Attribute("Parameters").Value.Split('|'); + var vals = xParamAttr.Value.Split('|'); double.TryParse(vals[0], NumberStyles.Any, CultureInfo.InvariantCulture, out _5th); double.TryParse(vals[1], NumberStyles.Any, CultureInfo.InvariantCulture, out _50th); double.TryParse(vals[2], NumberStyles.Any, CultureInfo.InvariantCulture, out _95th); @@ -569,7 +574,7 @@ public static PertPercentileZ FromXElement(XElement xElement) } else { - return null; + return null!; } } diff --git a/Numerics/Distributions/Univariate/Poisson.cs b/Numerics/Distributions/Univariate/Poisson.cs index 6295ddce..9e8d1821 100644 --- a/Numerics/Distributions/Univariate/Poisson.cs +++ b/Numerics/Distributions/Univariate/Poisson.cs @@ -217,7 +217,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par throw new ArgumentOutOfRangeException(nameof(Lambda), "The rate (λ) must be positive."); return new ArgumentOutOfRangeException(nameof(Lambda), "The rate (λ) must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/Rayleigh.cs b/Numerics/Distributions/Univariate/Rayleigh.cs index c5bca12c..8c74e25e 100644 --- a/Numerics/Distributions/Univariate/Rayleigh.cs +++ b/Numerics/Distributions/Univariate/Rayleigh.cs @@ -267,7 +267,7 @@ public ArgumentOutOfRangeException ValidateParameters(double scale, bool throwEx throw new ArgumentOutOfRangeException(nameof(Sigma), "Standard deviation must be greater than zero."); return new ArgumentOutOfRangeException(nameof(Sigma), "Standard deviation must be greater than zero."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/StudentT.cs b/Numerics/Distributions/Univariate/StudentT.cs index a6f821cc..599babb5 100644 --- a/Numerics/Distributions/Univariate/StudentT.cs +++ b/Numerics/Distributions/Univariate/StudentT.cs @@ -361,7 +361,7 @@ public ArgumentOutOfRangeException ValidateParameters(double location, double sc throw new ArgumentOutOfRangeException(nameof(DegreesOfFreedom), "The degrees of freedom ν (nu) must greater than or equal to one."); return new ArgumentOutOfRangeException(nameof(DegreesOfFreedom), "The degrees of freedom ν (nu) must greater than or equal to one."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/Triangular.cs b/Numerics/Distributions/Univariate/Triangular.cs index 3a65a94f..e16e098b 100644 --- a/Numerics/Distributions/Univariate/Triangular.cs +++ b/Numerics/Distributions/Univariate/Triangular.cs @@ -334,7 +334,7 @@ private ArgumentOutOfRangeException ValidateParameters(double min, double mode, throw new ArgumentOutOfRangeException(nameof(MostLikely), "The mode (most likely) must be between the min and max."); return new ArgumentOutOfRangeException(nameof(MostLikely), "The mode (most likely) must be between the min and max."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/TruncatedDistribution.cs b/Numerics/Distributions/Univariate/TruncatedDistribution.cs index 04275f56..6fb26359 100644 --- a/Numerics/Distributions/Univariate/TruncatedDistribution.cs +++ b/Numerics/Distributions/Univariate/TruncatedDistribution.cs @@ -273,7 +273,7 @@ public override void SetParameters(IList parameters) /// public override ArgumentOutOfRangeException ValidateParameters(IList parameters, bool throwException) { - if (_baseDist != null) _baseDist.ValidateParameters(parameters.ToArray().Subset(0, parameters.Count - 2), throwException); + if (_baseDist != null!) _baseDist.ValidateParameters(parameters.ToArray().Subset(0, parameters.Count - 2), throwException); if (double.IsNaN(Min) || double.IsNaN(Max) || double.IsInfinity(Min) || double.IsInfinity(Max) || Min >= Max) { if (throwException) @@ -286,7 +286,7 @@ public override ArgumentOutOfRangeException ValidateParameters(IList par throw new ArgumentOutOfRangeException(nameof(Min), "Truncation interval has zero probability mass."); return new ArgumentOutOfRangeException(nameof(Min), "Truncation interval has zero probability mass."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/TruncatedNormal.cs b/Numerics/Distributions/Univariate/TruncatedNormal.cs index f1850f01..465352bd 100644 --- a/Numerics/Distributions/Univariate/TruncatedNormal.cs +++ b/Numerics/Distributions/Univariate/TruncatedNormal.cs @@ -386,7 +386,7 @@ public ArgumentOutOfRangeException ValidateParameters(double mean, double standa throw new ArgumentOutOfRangeException(nameof(Min), "The min cannot be greater than the max."); return new ArgumentOutOfRangeException(nameof(Min), "The min cannot be greater than the max."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/Uncertainty Analysis/BootstrapAnalysis.cs b/Numerics/Distributions/Univariate/Uncertainty Analysis/BootstrapAnalysis.cs index 93940d00..28c2b6b5 100644 --- a/Numerics/Distributions/Univariate/Uncertainty Analysis/BootstrapAnalysis.cs +++ b/Numerics/Distributions/Univariate/Uncertainty Analysis/BootstrapAnalysis.cs @@ -141,7 +141,7 @@ public IUnivariateDistribution[] Distributions() // MLE and certain L-moments methods can fail to find a solution // On fail, set to null - if (failed == true) bootDistributions[idx] = null; + if (failed == true) bootDistributions[idx] = null!; }); return bootDistributions; @@ -171,7 +171,7 @@ public IUnivariateDistribution[] Distributions(ParameterSet[] parameterSets) }; // On fail, set to null - if (failed == true) bootDistributions[idx] = null; + if (failed == true) bootDistributions[idx] = null!; }); return bootDistributions; @@ -181,7 +181,7 @@ public IUnivariateDistribution[] Distributions(ParameterSet[] parameterSets) /// /// Bootstrap an array of distribution parameters. /// - public double[,] Parameters(IUnivariateDistribution[] distributions = null) + public double[,] Parameters(IUnivariateDistribution[] distributions = null!) { var bootDistributions = distributions != null ? distributions : Distributions(); var bootParameters = new double[bootDistributions.Count(), Distribution.NumberOfParameters]; @@ -206,7 +206,7 @@ public IUnivariateDistribution[] Distributions(ParameterSet[] parameterSets) /// /// Bootstrap an array of distribution parameter sets. /// - public ParameterSet[] ParameterSets(IUnivariateDistribution[] distributions = null) + public ParameterSet[] ParameterSets(IUnivariateDistribution[] distributions = null!) { var bootDistributions = distributions != null ? distributions : Distributions(); var bootParameters = new ParameterSet[bootDistributions.Count()]; @@ -293,7 +293,7 @@ public ParameterSet[] ParameterSets(IUnivariateDistribution[] distributions = nu /// The confidence level; Default = 0.1, which will result in the 90% confidence intervals. /// Optional. Pass in an array of bootstrapped distributions. Default = null. /// Optional. Determines whether to record parameter sets. Default = true. - public UncertaintyAnalysisResults Estimate(IList probabilities, double alpha = 0.1, IUnivariateDistribution[] distributions = null, bool recordParameterSets = true) + public UncertaintyAnalysisResults Estimate(IList probabilities, double alpha = 0.1, IUnivariateDistribution[] distributions = null!, bool recordParameterSets = true) { var results = new UncertaintyAnalysisResults(); results.ParentDistribution = (UnivariateDistributionBase)Distribution; @@ -344,7 +344,7 @@ public UncertaintyAnalysisResults Estimate(IList probabilities, double a /// List quantile values. /// List of non-exceedance probabilities. /// Optional. Pass in an array of bootstrapped distributions. Default = null. - public double[] ExpectedProbabilities(IList quantiles, IList probabilities, IUnivariateDistribution[] distributions = null) + public double[] ExpectedProbabilities(IList quantiles, IList probabilities, IUnivariateDistribution[] distributions = null!) { var quants = quantiles.ToArray(); var probs = probabilities.ToArray(); @@ -396,7 +396,7 @@ public double[] ExpectedProbabilities(IList quantiles, IList pro /// /// List quantile values. /// Optional. Pass in an array of bootstrapped distributions. Default = null. - public double[] ExpectedProbabilities(IList quantiles, IUnivariateDistribution[] distributions = null) + public double[] ExpectedProbabilities(IList quantiles, IUnivariateDistribution[] distributions = null!) { var quants = quantiles.ToArray(); Array.Sort(quants); @@ -447,7 +447,7 @@ public double[] ComputeMinMaxQuantiles(double minProbability, double maxProbabil /// List of non-exceedance probabilities. /// The confidence level; Default = 0.1, which will result in the 90% confidence intervals. /// Optional. Pass in an array of bootstrapped distributions. Default = null. - public double[,] PercentileQuantileCI(IList probabilities, double alpha = 0.1, IUnivariateDistribution[] distributions = null) + public double[,] PercentileQuantileCI(IList probabilities, double alpha = 0.1, IUnivariateDistribution[] distributions = null!) { var CIs = new double[] { alpha / 2d, 1d - alpha / 2d }; var Output = new double[probabilities.Count, 2]; @@ -474,7 +474,7 @@ public double[] ComputeMinMaxQuantiles(double minProbability, double maxProbabil /// List of non-exceedance probabilities. /// The confidence level; Default = 0.1, which will result in the 90% confidence intervals. /// Optional. Pass in an array of bootstrapped distributions. Default = null. - public double[,] BiasCorrectedQuantileCI(IList probabilities, double alpha = 0.1, IUnivariateDistribution[] distributions = null) + public double[,] BiasCorrectedQuantileCI(IList probabilities, double alpha = 0.1, IUnivariateDistribution[] distributions = null!) { // Create list of original X values given probability values var populationXValues = new double[probabilities.Count]; @@ -520,7 +520,7 @@ public double[] ComputeMinMaxQuantiles(double minProbability, double maxProbabil /// List of non-exceedance probabilities. /// The confidence level; Default = 0.1, which will result in the 90% confidence intervals. /// Optional. Pass in an array of bootstrapped distributions. Default = null. - public double[,] NormalQuantileCI(IList probabilities, double alpha = 0.1, IUnivariateDistribution[] distributions = null) + public double[,] NormalQuantileCI(IList probabilities, double alpha = 0.1, IUnivariateDistribution[] distributions = null!) { // Create list of original X values given probability values @@ -713,7 +713,7 @@ private double[] AccelerationConstants(IList sampleData, IList p { // MLE and certain L-moments methods can fail to find a solution // On fail, set to null - bootDistributions[i] = null; + bootDistributions[i] = null!; for (int j = 0; j < probabilities.Count; j++) { xValues[i, j] = double.NaN; diff --git a/Numerics/Distributions/Univariate/Uncertainty Analysis/UncertaintyAnalysisResults.cs b/Numerics/Distributions/Univariate/Uncertainty Analysis/UncertaintyAnalysisResults.cs index c463c299..29ff0a46 100644 --- a/Numerics/Distributions/Univariate/Uncertainty Analysis/UncertaintyAnalysisResults.cs +++ b/Numerics/Distributions/Univariate/Uncertainty Analysis/UncertaintyAnalysisResults.cs @@ -81,7 +81,7 @@ public UncertaintyAnalysisResults(UnivariateDistributionBase parentDistribution, double maxProbability = 1 - 1e-9, bool recordParameterSets = false) { - if (parentDistribution == null) + if (parentDistribution == null!) throw new ArgumentNullException(nameof(parentDistribution)); if (sampledDistributions == null || sampledDistributions.Length == 0) throw new ArgumentException("Sampled distributions cannot be null or empty.", nameof(sampledDistributions)); @@ -106,27 +106,27 @@ public UncertaintyAnalysisResults(UnivariateDistributionBase parentDistribution, /// /// The parent probability distribution. /// - public UnivariateDistributionBase ParentDistribution { get; set; } + public UnivariateDistributionBase ParentDistribution { get; set; } = null!; /// /// The array of parameter sets. /// - public ParameterSet[] ParameterSets { get; set; } + public ParameterSet[] ParameterSets { get; set; } = null!; /// /// The confidence intervals. /// - public double[,] ConfidenceIntervals { get; set; } + public double[,] ConfidenceIntervals { get; set; } = null!; /// /// The mode (or computed) curve from the parent distribution. /// - public double[] ModeCurve { get; set; } + public double[] ModeCurve { get; set; } = null!; /// /// The mean (or predictive) curve. /// - public double[] MeanCurve { get; set; } + public double[] MeanCurve { get; set; } = null!; /// /// Gets or sets the Akaike information criteria (AIC) of the fit. @@ -189,7 +189,8 @@ public static UncertaintyAnalysisResults FromByteArray(byte[] bytes) options.Converters.Add(new Double2DArrayConverter()); options.Converters.Add(new String2DArrayConverter()); options.Converters.Add(new UnivariateDistributionConverter()); - return JsonSerializer.Deserialize(bytes, options); + var result = JsonSerializer.Deserialize(bytes, options); + return result ?? FromByteArrayLegacy(bytes); } catch (Exception) { @@ -226,7 +227,7 @@ private static UncertaintyAnalysisResults FromByteArrayLegacy(byte[] bytes) // If there is an error, just catch it and force the user to rerun the // uncertainty analysis. } - return null; + return null!; } /// @@ -235,7 +236,7 @@ private static UncertaintyAnalysisResults FromByteArrayLegacy(byte[] bytes) public XElement ToXElement() { var result = new XElement(nameof(UncertaintyAnalysisResults)); - if (ParentDistribution != null) result.Add(ParentDistribution.ToXElement()); + if (ParentDistribution != null!) result.Add(ParentDistribution.ToXElement()); result.SetAttributeValue(nameof(AIC), AIC.ToString("G17", CultureInfo.InvariantCulture)); result.SetAttributeValue(nameof(BIC), BIC.ToString("G17", CultureInfo.InvariantCulture)); result.SetAttributeValue(nameof(DIC), DIC.ToString("G17", CultureInfo.InvariantCulture)); @@ -268,44 +269,62 @@ public static UncertaintyAnalysisResults FromXElement(XElement xElement) { var ua = new UncertaintyAnalysisResults(); // Parent distribution - if (xElement.Element("Distribution") != null) - ua.ParentDistribution = UnivariateDistributionFactory.CreateDistribution(xElement.Element("Distribution")); + var distElement = xElement.Element("Distribution"); + if (distElement != null) + { + var parentDist = UnivariateDistributionFactory.CreateDistribution(distElement); + if (parentDist is not null) + { + ua.ParentDistribution = parentDist; + } + else + { + throw new InvalidDataException("Unable to deserialize parent distribution from XElement."); + } + } + // AIC - if (xElement.Attribute(nameof(AIC)) != null) + var aicElement = xElement.Attribute(nameof(AIC)); + if (aicElement != null) { - double.TryParse(xElement.Attribute(nameof(AIC)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var aic); + double.TryParse(aicElement.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var aic); ua.AIC = aic; } // BIC - if (xElement.Attribute(nameof(BIC)) != null) + var bicElement = xElement.Attribute(nameof(BIC)); + if (bicElement != null) { - double.TryParse(xElement.Attribute(nameof(BIC)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var bic); + double.TryParse(bicElement.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var bic); ua.BIC = bic; } // DIC - if (xElement.Attribute(nameof(DIC)) != null) + var dicElement = xElement.Attribute(nameof(DIC)); + if (dicElement != null) { - double.TryParse(xElement.Attribute(nameof(DIC)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var dic); + double.TryParse(dicElement.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var dic); ua.DIC = dic; } // RMSE - if (xElement.Attribute(nameof(RMSE)) != null) + var rmseElement = xElement.Attribute(nameof(RMSE)); + if (rmseElement != null) { - double.TryParse(xElement.Attribute(nameof(RMSE)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var rmse); + double.TryParse(rmseElement.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var rmse); ua.RMSE = rmse; } // ERL - if (xElement.Attribute(nameof(ERL)) != null) + var erlElement = xElement.Attribute(nameof(ERL)); + if (erlElement != null) { - double.TryParse(xElement.Attribute(nameof(ERL)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var erl); + double.TryParse(erlElement.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var erl); ua.ERL = erl; } // Mode Curve - if (xElement.Attribute(nameof(ua.ModeCurve)) != null) + var modeAttr = xElement.Attribute(nameof(ua.ModeCurve)); + if (modeAttr != null) { - var vals = xElement.Attribute(nameof(ua.ModeCurve)).Value.Split('|'); + var vals = modeAttr.Value.Split('|'); if (vals.Length > 0) { ua.ModeCurve = new double[vals.Length]; @@ -316,9 +335,10 @@ public static UncertaintyAnalysisResults FromXElement(XElement xElement) } } // Mean Curve - if (xElement.Attribute(nameof(ua.MeanCurve)) != null) + var meanAttr = xElement.Attribute(nameof(ua.MeanCurve)); + if (meanAttr != null) { - var vals = xElement.Attribute(nameof(ua.MeanCurve)).Value.Split('|'); + var vals = meanAttr.Value.Split('|'); if (vals.Length > 0) { ua.MeanCurve = new double[vals.Length]; @@ -329,9 +349,10 @@ public static UncertaintyAnalysisResults FromXElement(XElement xElement) } } // Confidence Intervals - if (xElement.Attribute(nameof(ua.ConfidenceIntervals)) != null) + var ciAttr = xElement.Attribute(nameof(ua.ConfidenceIntervals)); + if (ciAttr != null) { - var vals = xElement.Attribute(nameof(ua.ConfidenceIntervals)).Value.Split('|'); + var vals = ciAttr.Value.Split('|'); if (vals.Length > 0) { ua.ConfidenceIntervals = new double[vals.Length, vals[0].Split(',').Length]; @@ -355,7 +376,7 @@ public static UncertaintyAnalysisResults FromXElement(XElement xElement) /// Array of non-exceedance probabilities. public void ProcessModeCurve(UnivariateDistributionBase parentDistribution, double[] probabilities) { - if (parentDistribution == null) + if (parentDistribution == null!) throw new ArgumentNullException(nameof(parentDistribution)); if (probabilities == null || probabilities.Length == 0) throw new ArgumentException("Probabilities cannot be null or empty.", nameof(probabilities)); @@ -442,7 +463,7 @@ public void ProcessMeanCurve(UnivariateDistributionBase[] sampledDistributions, Parallel.For(0, B, j => { - if (sampledDistributions[j] != null) + if (sampledDistributions[j] != null!) { var innerMin = sampledDistributions[j].InverseCDF(minProbability); var innerMax = sampledDistributions[j].InverseCDF(maxProbability); @@ -478,7 +499,7 @@ public void ProcessMeanCurve(UnivariateDistributionBase[] sampledDistributions, double total = 0d; Parallel.For(0, B, () => 0d, (j, loop, sum) => { - if (sampledDistributions[j] != null) + if (sampledDistributions[j] != null!) { sum += sampledDistributions[j].CDF(quantiles[i]); } @@ -530,7 +551,7 @@ public void ProcessParameterSets(UnivariateDistributionBase[] sampledDistributio Parallel.For(0, B, idx => { - if (sampledDistributions[idx] != null) + if (sampledDistributions[idx] != null!) { ParameterSets[idx] = new ParameterSet(sampledDistributions[idx].GetParameters, double.NaN); } diff --git a/Numerics/Distributions/Univariate/Uniform.cs b/Numerics/Distributions/Univariate/Uniform.cs index cea983c7..9497e43f 100644 --- a/Numerics/Distributions/Univariate/Uniform.cs +++ b/Numerics/Distributions/Univariate/Uniform.cs @@ -251,7 +251,7 @@ public ArgumentOutOfRangeException ValidateParameters(double min, double max, bo throw new ArgumentOutOfRangeException(nameof(Min), "The min cannot be greater than the max."); return new ArgumentOutOfRangeException(nameof(Min), "The min cannot be greater than the max."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/UniformDiscrete.cs b/Numerics/Distributions/Univariate/UniformDiscrete.cs index f5f6500d..ab603bec 100644 --- a/Numerics/Distributions/Univariate/UniformDiscrete.cs +++ b/Numerics/Distributions/Univariate/UniformDiscrete.cs @@ -259,7 +259,7 @@ public ArgumentOutOfRangeException ValidateParameters(double min, double max, bo throw new ArgumentOutOfRangeException(nameof(Min), "The min cannot be greater than the max."); return new ArgumentOutOfRangeException(nameof(Min), "The min cannot be greater than the max."); } - return null; + return null!; } /// diff --git a/Numerics/Distributions/Univariate/Weibull.cs b/Numerics/Distributions/Univariate/Weibull.cs index f3683016..1f8a5bc4 100644 --- a/Numerics/Distributions/Univariate/Weibull.cs +++ b/Numerics/Distributions/Univariate/Weibull.cs @@ -304,7 +304,7 @@ public ArgumentOutOfRangeException ValidateParameters(double scale, double shape throw new ArgumentOutOfRangeException(nameof(Kappa), "The shape parameter κ (kappa) must be positive."); return new ArgumentOutOfRangeException(nameof(Kappa), "The shape parameter κ (kappa) must be positive."); } - return null; + return null!; } /// diff --git a/Numerics/Functions/LinearFunction.cs b/Numerics/Functions/LinearFunction.cs index 2902b269..9d4dee0f 100644 --- a/Numerics/Functions/LinearFunction.cs +++ b/Numerics/Functions/LinearFunction.cs @@ -173,7 +173,7 @@ public ArgumentOutOfRangeException ValidateParameters(IList parameters, throw new ArgumentOutOfRangeException(nameof(Sigma), "Standard error must be greater than zero."); return new ArgumentOutOfRangeException(nameof(Sigma), "Standard error must be greater than zero."); } - return null; + return null!; } /// diff --git a/Numerics/Functions/PowerFunction.cs b/Numerics/Functions/PowerFunction.cs index 08250343..900ac1e7 100644 --- a/Numerics/Functions/PowerFunction.cs +++ b/Numerics/Functions/PowerFunction.cs @@ -200,7 +200,7 @@ public ArgumentOutOfRangeException ValidateParameters(IList parameters, throw new ArgumentOutOfRangeException(nameof(Sigma), "Standard error must be greater than zero."); return new ArgumentOutOfRangeException(nameof(Sigma), "Standard error must be greater than zero."); } - return null; + return null!; } /// diff --git a/Numerics/Functions/TabularFunction.cs b/Numerics/Functions/TabularFunction.cs index ab741110..8e374875 100644 --- a/Numerics/Functions/TabularFunction.cs +++ b/Numerics/Functions/TabularFunction.cs @@ -159,7 +159,7 @@ public ArgumentOutOfRangeException ValidateParameters(IList parameters, throw new ArgumentOutOfRangeException(nameof(PairedData), "The uncertain ordered paired data has errors."); return new ArgumentOutOfRangeException(nameof(PairedData), "The uncertain ordered paired data has errors."); } - return null; + return null!; } /// diff --git a/Numerics/Machine Learning/Supervised/DecisionTree.cs b/Numerics/Machine Learning/Supervised/DecisionTree.cs index e3a9d35f..bab82c7d 100644 --- a/Numerics/Machine Learning/Supervised/DecisionTree.cs +++ b/Numerics/Machine Learning/Supervised/DecisionTree.cs @@ -456,7 +456,7 @@ public double[] Predict(double[,] X) /// The matrix of predictors. public double[] Predict(Matrix X) { - if (!IsTrained || X.NumberOfColumns != Dimensions) return null; + if (!IsTrained || X.NumberOfColumns != Dimensions) return null!; var result = new double[X.NumberOfRows]; for (int i = 0; i < X.NumberOfRows; i++) { diff --git a/Numerics/Machine Learning/Supervised/GeneralizedLinearModel.cs b/Numerics/Machine Learning/Supervised/GeneralizedLinearModel.cs index 19aed4db..e36ff85c 100644 --- a/Numerics/Machine Learning/Supervised/GeneralizedLinearModel.cs +++ b/Numerics/Machine Learning/Supervised/GeneralizedLinearModel.cs @@ -118,7 +118,7 @@ public GeneralizedLinearModel(Matrix x, Vector y, bool hasIntercept = true, Link /// /// The list of estimated parameter values. /// - public double[] Parameters { get; private set; } + public double[] Parameters { get; private set; } = Array.Empty(); /// /// The list of the estimated parameter names. @@ -128,27 +128,27 @@ public GeneralizedLinearModel(Matrix x, Vector y, bool hasIntercept = true, Link /// /// The list of the estimated parameter standard errors. /// - public double[] ParameterStandardErrors { get; private set; } + public double[] ParameterStandardErrors { get; private set; } = Array.Empty(); /// /// The list of the estimated parameter z-scores. /// - public double[] ParameterZScores { get; private set; } + public double[] ParameterZScores { get; private set; } = Array.Empty(); /// /// The list of the estimated parameter p-values. /// - public double[] ParameterPValues { get; private set; } + public double[] ParameterPValues { get; private set; } = Array.Empty(); /// /// The estimate parameter covariance matrix. /// - public Matrix Covariance { get; private set; } + public Matrix Covariance { get; private set; } = new Matrix(1, 1); /// /// The residuals of the fitted linear model. /// - public double[] Residuals { get; private set; } + public double[] Residuals { get; private set; } = Array.Empty(); /// /// The model standard error. @@ -188,7 +188,7 @@ public GeneralizedLinearModel(Matrix x, Vector y, bool hasIntercept = true, Link /// /// Gets the optimizer used to train the model. Default = Nelder-Mead. /// - public Optimizer Optimizer { get; private set; } + public Optimizer Optimizer { get; private set; } = null!; /// /// Gets the link function type. diff --git a/Numerics/Machine Learning/Supervised/KNearestNeighbors.cs b/Numerics/Machine Learning/Supervised/KNearestNeighbors.cs index 24b4e42f..f8c7bdfd 100644 --- a/Numerics/Machine Learning/Supervised/KNearestNeighbors.cs +++ b/Numerics/Machine Learning/Supervised/KNearestNeighbors.cs @@ -271,7 +271,7 @@ public double[] BootstrapPredict(Matrix X, int seed = -1) /// The test matrix of predictors private int[] kNN(Matrix xTrain, Vector yTrain, Matrix xTest) { - if (NumberOfFeatures != xTrain.NumberOfColumns) return null; + if (NumberOfFeatures != xTrain.NumberOfColumns) return null!; int R = xTest.NumberOfRows; var result = new int[K]; for (int i = 0; i < R; i++) @@ -306,7 +306,7 @@ private int[] kNN(Matrix xTrain, Vector yTrain, Matrix xTest) /// The test matrix of predictors private double[] kNNPredict(Matrix xTrain, Vector yTrain, Matrix xTest) { - if (xTest.NumberOfColumns != xTrain.NumberOfColumns) return null; + if (xTest.NumberOfColumns != xTrain.NumberOfColumns) return null!; int R = xTest.NumberOfRows; var result = new double[R]; for (int i = 0; i < R; i++) diff --git a/Numerics/Machine Learning/Supervised/NaiveBayes.cs b/Numerics/Machine Learning/Supervised/NaiveBayes.cs index 3a16ef9d..7a47721b 100644 --- a/Numerics/Machine Learning/Supervised/NaiveBayes.cs +++ b/Numerics/Machine Learning/Supervised/NaiveBayes.cs @@ -143,17 +143,17 @@ public NaiveBayes(Matrix x, Vector y) /// /// The means of each feature given each class. /// - public double[,] Means { get; private set; } + public double[,] Means { get; private set; } = null!; /// /// The standard deviations of each feature given each class. /// - public double[,] StandardDeviations { get; private set; } + public double[,] StandardDeviations { get; private set; } = null!; /// /// The prior probability for each class. /// - public double[] Priors { get; private set; } + public double[] Priors { get; private set; } = null!; /// /// Determines if the classifier has been trained. @@ -245,7 +245,7 @@ public double[] Predict(double[,] X) /// The matrix of predictors. public double[] Predict(Matrix X) { - if (!IsTrained || X.NumberOfColumns != this.X.NumberOfColumns) return null; + if (!IsTrained || X.NumberOfColumns != this.X.NumberOfColumns) return null!; var result = new double[X.NumberOfRows]; for (int i = 0; i < X.NumberOfRows; i++) { diff --git a/Numerics/Machine Learning/Supervised/RandomForest.cs b/Numerics/Machine Learning/Supervised/RandomForest.cs index 5d8e6b13..94da4efe 100644 --- a/Numerics/Machine Learning/Supervised/RandomForest.cs +++ b/Numerics/Machine Learning/Supervised/RandomForest.cs @@ -166,7 +166,7 @@ public RandomForest(Matrix x, Vector y, int seed = -1) /// /// The array of decision trees. /// - public DecisionTree[] DecisionTrees { get; private set; } + public DecisionTree[] DecisionTrees { get; private set; } = null!; /// /// Determines whether this is for regression or classification. Default = regression. @@ -250,7 +250,7 @@ private DecisionTree BootstrapDecisionTree(int seed = -1) /// The confidence level; Default = 0.1, which will result in the 90% confidence intervals. public double[,] Predict(Matrix X, double alpha = 0.1) { - if (!IsTrained) return null; + if (!IsTrained) return null!; var percentiles = new double[] { alpha / 2d, 0.5, 1d - alpha / 2d }; var output = new double[X.NumberOfRows, 4]; // lower, median, upper, mean diff --git a/Numerics/Machine Learning/Support/DecisionNode.cs b/Numerics/Machine Learning/Support/DecisionNode.cs index 292e3c43..4a7d0014 100644 --- a/Numerics/Machine Learning/Support/DecisionNode.cs +++ b/Numerics/Machine Learning/Support/DecisionNode.cs @@ -54,12 +54,12 @@ public class DecisionNode /// /// Nodes to the left of the threshold. /// - public DecisionNode Left { get; set; } = null; + public DecisionNode Left { get; set; } = null!; /// /// Nodes to the right of the threshold. /// - public DecisionNode Right { get; set; } = null; + public DecisionNode Right { get; set; } = null!; /// /// The leaf node value. diff --git a/Numerics/Machine Learning/Unsupervised/GaussianMixtureModel.cs b/Numerics/Machine Learning/Unsupervised/GaussianMixtureModel.cs index 3429cd32..25ccdeb1 100644 --- a/Numerics/Machine Learning/Unsupervised/GaussianMixtureModel.cs +++ b/Numerics/Machine Learning/Unsupervised/GaussianMixtureModel.cs @@ -152,12 +152,12 @@ public GaussianMixtureModel(Matrix X, int k) /// /// The mixing weights /// - public double[] Weights { get; private set; } + public double[] Weights { get; private set; } = null!; /// /// The likelihood of each data point (row) and for each cluster (column). /// - public double[,] LikelihoodMatrix { get; private set; } + public double[,] LikelihoodMatrix { get; private set; } = null!; /// /// The total log-likelihood of the fit. diff --git a/Numerics/Machine Learning/Unsupervised/JenksNaturalBreaks.cs b/Numerics/Machine Learning/Unsupervised/JenksNaturalBreaks.cs index 81ab280f..c2df6e79 100644 --- a/Numerics/Machine Learning/Unsupervised/JenksNaturalBreaks.cs +++ b/Numerics/Machine Learning/Unsupervised/JenksNaturalBreaks.cs @@ -128,12 +128,12 @@ public JenksNaturalBreaks(IList data, int numberOfClusters, bool isDataSo /// /// Gets the array of estimated clusters. /// - public JenksCluster[] Clusters { get; private set; } + public JenksCluster[] Clusters { get; private set; } = null!; /// /// The array of break points. /// - public double[] Breaks { get; private set; } + public double[] Breaks { get; private set; } = null!; /// /// The goodness of fit measure. The closer to 1, the better the fit. diff --git a/Numerics/Mathematics/Differentiation/NumericalDerivative.cs b/Numerics/Mathematics/Differentiation/NumericalDerivative.cs index 1f2e2264..7cd04bff 100644 --- a/Numerics/Mathematics/Differentiation/NumericalDerivative.cs +++ b/Numerics/Mathematics/Differentiation/NumericalDerivative.cs @@ -427,8 +427,8 @@ public static double SecondDerivativeBackward(Func f, double poi public static double[,] Jacobian( Func g, double[] theta, - double[] lowerBounds = null, - double[] upperBounds = null, + double[] lowerBounds = null!, + double[] upperBounds = null!, double relStep = 1e-5, double absStep = 1e-7, int maxBacktrack = 5) @@ -619,8 +619,8 @@ public static double SecondDerivativeBackward(Func f, double poi public static double[] Gradient( Func f, double[] theta, - double[] lowerBounds = null, - double[] upperBounds = null, + double[] lowerBounds = null!, + double[] upperBounds = null!, double relStep = 1e-5, double absStep = 1e-7, int maxBacktrack = 5) @@ -798,8 +798,8 @@ public static double[] Gradient( public static double[,] Hessian( Func f, double[] theta, - double[] lowerBounds = null, - double[] upperBounds = null, + double[] lowerBounds = null!, + double[] upperBounds = null!, double relStep = 1e-4, double absStep = 1e-6, int maxBacktrack = 6) diff --git a/Numerics/Mathematics/Fourier Methods/Fourier.cs b/Numerics/Mathematics/Fourier Methods/Fourier.cs index 21d50ec3..922a56ce 100644 --- a/Numerics/Mathematics/Fourier Methods/Fourier.cs +++ b/Numerics/Mathematics/Fourier Methods/Fourier.cs @@ -283,7 +283,7 @@ public static double[] Correlation(double[] data1, double[] data2) if (lagMax < 0) lagMax = (int)Math.Floor(Math.Min(10d * Math.Log10(n), n - 1)); if (lagMax < 1 || n < 2) - return null; + return null!; // Pad the length to be the power of 2 to facilitate FFT speed. int newLength = Convert.ToInt32(Math.Pow(2d, Math.Ceiling(Math.Log(series.Count, 2d)))); // Normalize the data series diff --git a/Numerics/Mathematics/Integration/AdaptiveGuassKronrod.cs b/Numerics/Mathematics/Integration/AdaptiveGuassKronrod.cs index ec1fb7e5..f942fd0b 100644 --- a/Numerics/Mathematics/Integration/AdaptiveGuassKronrod.cs +++ b/Numerics/Mathematics/Integration/AdaptiveGuassKronrod.cs @@ -186,10 +186,10 @@ public override void Integrate() Status = IntegrationStatus.Success; } } - catch (Exception ex) + catch (Exception) { Status = IntegrationStatus.Failure; - if (ReportFailure) throw ex; + if (ReportFailure) throw; } } @@ -231,10 +231,10 @@ public void Integrate(List bins) Status = IntegrationStatus.Success; } } - catch (Exception ex) + catch (Exception) { Status = IntegrationStatus.Failure; - if (ReportFailure) throw ex; + if (ReportFailure) throw; } } diff --git a/Numerics/Mathematics/Integration/AdaptiveSimpsonsRule2D.cs b/Numerics/Mathematics/Integration/AdaptiveSimpsonsRule2D.cs index 81dbda07..5539218e 100644 --- a/Numerics/Mathematics/Integration/AdaptiveSimpsonsRule2D.cs +++ b/Numerics/Mathematics/Integration/AdaptiveSimpsonsRule2D.cs @@ -170,10 +170,10 @@ public override void Integrate() Status = IntegrationStatus.Success; } } - catch (Exception ex) + catch (Exception) { Status = IntegrationStatus.Failure; - if (ReportFailure) throw ex; + if (ReportFailure) throw; } } diff --git a/Numerics/Mathematics/Integration/Miser.cs b/Numerics/Mathematics/Integration/Miser.cs index 15dde370..215feddf 100644 --- a/Numerics/Mathematics/Integration/Miser.cs +++ b/Numerics/Mathematics/Integration/Miser.cs @@ -340,7 +340,7 @@ private void miser(Func function, double[] regn, int npts, dou private void ranpt(double[] pt, double[] regn) { int j, n = pt.Length; - double[] rnd = null; + double[] rnd = null!; if (UseSobolSequence) rnd = _sobol.NextDouble(); diff --git a/Numerics/Mathematics/Integration/Support/Integrator.cs b/Numerics/Mathematics/Integration/Support/Integrator.cs index a0a9a3a5..e9baeceb 100644 --- a/Numerics/Mathematics/Integration/Support/Integrator.cs +++ b/Numerics/Mathematics/Integration/Support/Integrator.cs @@ -150,7 +150,7 @@ protected virtual void Validate() /// /// Optimization status. /// Inner exception. - protected virtual void UpdateStatus(IntegrationStatus status, Exception exception = null) + protected virtual void UpdateStatus(IntegrationStatus status, Exception exception = null!) { Status = status; if (status == IntegrationStatus.Failure) diff --git a/Numerics/Mathematics/Integration/Vegas.cs b/Numerics/Mathematics/Integration/Vegas.cs index fc3a93bb..39205954 100644 --- a/Numerics/Mathematics/Integration/Vegas.cs +++ b/Numerics/Mathematics/Integration/Vegas.cs @@ -112,7 +112,7 @@ public Vegas(Func function, int dimensions, IList function, int dimensions, IList diff --git a/Numerics/Mathematics/Linear Algebra/Support/Matrix.cs b/Numerics/Mathematics/Linear Algebra/Support/Matrix.cs index a2f5c333..3688d63a 100644 --- a/Numerics/Mathematics/Linear Algebra/Support/Matrix.cs +++ b/Numerics/Mathematics/Linear Algebra/Support/Matrix.cs @@ -186,8 +186,10 @@ public Matrix(XElement xElement) int nrow = 0, ncol = 0; - if (xElement.Attribute(nameof(NumberOfRows)) != null) int.TryParse(xElement.Attribute(nameof(NumberOfRows)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out nrow); - if (xElement.Attribute(nameof(NumberOfColumns)) != null) int.TryParse(xElement.Attribute(nameof(NumberOfColumns)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out ncol); + var rowsAttr = xElement.Attribute(nameof(NumberOfRows)); + var colsAttr = xElement.Attribute(nameof(NumberOfColumns)); + if (rowsAttr != null) int.TryParse(rowsAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out nrow); + if (colsAttr != null) int.TryParse(colsAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out ncol); _matrix = new double[nrow, ncol]; int rowCount = 0; @@ -219,7 +221,7 @@ public Matrix(XElement xElement) #region Members - private double[,] _matrix; + private double[,] _matrix = null!; /// /// Gets the number of rows. @@ -251,7 +253,7 @@ public int NumberOfColumns /// /// The matrix column header text. /// - public string[] Header { get; set; } + public string[] Header { get; set; } = null!; /// /// Evaluates whether this matrix is symmetric. diff --git a/Numerics/Mathematics/Linear Algebra/Support/Vector.cs b/Numerics/Mathematics/Linear Algebra/Support/Vector.cs index eb687253..b72e1416 100644 --- a/Numerics/Mathematics/Linear Algebra/Support/Vector.cs +++ b/Numerics/Mathematics/Linear Algebra/Support/Vector.cs @@ -108,10 +108,10 @@ public double this[int index] set { _vector[index] = value; } } - /// - /// The vector header text. - /// - public string Header { get; set; } + /// + /// The vector header text. + /// + public string Header { get; set; } = null!; /// diff --git a/Numerics/Mathematics/Optimization/Dynamic/Dijkstra.cs b/Numerics/Mathematics/Optimization/Dynamic/Dijkstra.cs index 1f0bcdf7..05197518 100644 --- a/Numerics/Mathematics/Optimization/Dynamic/Dijkstra.cs +++ b/Numerics/Mathematics/Optimization/Dynamic/Dijkstra.cs @@ -94,7 +94,7 @@ public static bool PathExists(float[,] resultTable, int nodeIndex) /// Optional number of nodes in the network. If not provided it will be calculated internally. /// Optional list of incoming edges from each node in the network. If not provided or mismatched with edges it will be calculated internally. /// Lookup table of shortest paths from any given node. - public static float[,] Solve(IList edges, int[] destinationIndices, int nodeCount = -1, List[] edgesFromNodes = null) + public static float[,] Solve(IList edges, int[] destinationIndices, int nodeCount = -1, List[] edgesFromNodes = null!) { // Set optional parameters if required. int nNodes = (nodeCount == -1) ? (edges.Max(o => Math.Max(o.FromIndex,o.ToIndex)) + 1) : nodeCount; @@ -147,7 +147,7 @@ public static bool PathExists(float[,] resultTable, int nodeIndex) /// Optional number of nodes in the network. If not provided it will be calculated internally. /// Optional list of incoming edges from each node in the network. If not provided or mismatched with edges it will be calculated internally. /// Lookup table of shortest paths from any given node. - public static float[,] Solve(IList edges, int destinationIndex, int nodeCount = -1, List[] edgesToNodes = null) + public static float[,] Solve(IList edges, int destinationIndex, int nodeCount = -1, List[] edgesToNodes = null!) { // Set optional parameters if required. int nNodes = (nodeCount == -1) ? (edges.Max(o => Math.Max(o.FromIndex, o.ToIndex)) + 1) : nodeCount; diff --git a/Numerics/Mathematics/Optimization/Dynamic/Network.cs b/Numerics/Mathematics/Optimization/Dynamic/Network.cs index ac5bdb50..6c4968b6 100644 --- a/Numerics/Mathematics/Optimization/Dynamic/Network.cs +++ b/Numerics/Mathematics/Optimization/Dynamic/Network.cs @@ -166,7 +166,7 @@ public List GetPath(int[] edgesToRemove, int startNodeIndex) } } // if n = 0, then no roads to escape to - if (heap.Count == 0) return null; + if (heap.Count == 0) return null!; float tempWeight; int tempIndex; @@ -345,7 +345,7 @@ public List GetPath(int[] edgesToRemove, int startNodeIndex) return UpdatedPath; } - else return null; + else return null!; } public List GetPath(int[] edgesToRemove, int startNodeIndex, float[,] existingResultsTable) @@ -407,7 +407,7 @@ public List GetPath(int[] edgesToRemove, int startNodeIndex, float[,] exist } //if n = 0 then no roads to escape to - if (heap.Count == 0) return null; + if (heap.Count == 0) return null!; float tempWeight; int tempIndex; diff --git a/Numerics/Mathematics/Optimization/Global/MLSL.cs b/Numerics/Mathematics/Optimization/Global/MLSL.cs index 4525f536..01c066bf 100644 --- a/Numerics/Mathematics/Optimization/Global/MLSL.cs +++ b/Numerics/Mathematics/Optimization/Global/MLSL.cs @@ -175,12 +175,12 @@ public MLSL(Func objectiveFunction, int numberOfParameters, IL /// /// The list of all sampled points. /// - public List SampledPoints { get; private set; } + public List SampledPoints { get; private set; } = null!; /// /// The list of all local optimums. /// - public List LocalMinimums { get; private set; } + public List LocalMinimums { get; private set; } = null!; /// /// The minimum number of iterations to carry out with no improvement. Default = 5. @@ -207,7 +207,7 @@ protected override void Optimize() double oldFit = double.MaxValue; int noImprovement = 0; bool cancel = false; - Optimizer solver = null; + Optimizer solver = null!; var prng = new MersenneTwister(PRNGSeed); // Set lower and upper bounds and @@ -378,7 +378,7 @@ protected override void Optimize() private Optimizer GetLocalOptimizer(IList initialValues, double relativeTolerance, double absoluteTolerance, ref bool cancel) { bool localCancel = false; - Optimizer solver = null; + Optimizer solver = null!; // Make sure the parameters are within the bounds. for (int i = 0; i < NumberOfParameters; i++) diff --git a/Numerics/Mathematics/Optimization/Global/MultiStart.cs b/Numerics/Mathematics/Optimization/Global/MultiStart.cs index 56b5d71b..05ac9006 100644 --- a/Numerics/Mathematics/Optimization/Global/MultiStart.cs +++ b/Numerics/Mathematics/Optimization/Global/MultiStart.cs @@ -163,7 +163,7 @@ protected override void Optimize() { int i, j, D = NumberOfParameters; bool cancel = false; - Optimizer solver = null; + Optimizer solver = null!; // Set lower and upper bounds and // create uniform distributions for each parameter @@ -219,7 +219,7 @@ protected override void Optimize() private Optimizer GetLocalOptimizer(IList initialValues, double relativeTolerance, double absoluteTolerance, ref bool cancel) { bool localCancel = false; - Optimizer solver = null; + Optimizer solver = null!; // Make sure the parameters are within the bounds. for (int i = 0; i < NumberOfParameters; i++) diff --git a/Numerics/Mathematics/Optimization/Global/ParticleSwarm.cs b/Numerics/Mathematics/Optimization/Global/ParticleSwarm.cs index 24d6c28a..566664f4 100644 --- a/Numerics/Mathematics/Optimization/Global/ParticleSwarm.cs +++ b/Numerics/Mathematics/Optimization/Global/ParticleSwarm.cs @@ -235,7 +235,7 @@ private class Particle /// The velocity determines how the particle moves through the search space. /// It is updated based on the particle's personal best and the global best. /// - public double[] Velocity { get; set; } + public double[] Velocity { get; set; } = null!; } } } diff --git a/Numerics/Mathematics/Optimization/Local/ADAM.cs b/Numerics/Mathematics/Optimization/Local/ADAM.cs index 6ba26947..0d8466ec 100644 --- a/Numerics/Mathematics/Optimization/Local/ADAM.cs +++ b/Numerics/Mathematics/Optimization/Local/ADAM.cs @@ -76,7 +76,7 @@ public class ADAM : Optimizer /// Optional. Function to evaluate the gradient. Default uses finite difference. public ADAM(Func objectiveFunction, int numberOfParameters, IList initialValues, IList lowerBounds, IList upperBounds, double alpha = 0.001, - Func gradient = null) : base(objectiveFunction, numberOfParameters) + Func gradient = null!) : base(objectiveFunction, numberOfParameters) { // Check if the length of the initial, lower and upper bounds equal the number of parameters if (initialValues.Count != numberOfParameters || lowerBounds.Count != numberOfParameters || upperBounds.Count != numberOfParameters) diff --git a/Numerics/Mathematics/Optimization/Local/BFGS.cs b/Numerics/Mathematics/Optimization/Local/BFGS.cs index 8f76256e..90f949a0 100644 --- a/Numerics/Mathematics/Optimization/Local/BFGS.cs +++ b/Numerics/Mathematics/Optimization/Local/BFGS.cs @@ -78,7 +78,7 @@ public class BFGS : Optimizer /// Optional. Function to evaluate the gradient. Default uses finite difference. public BFGS(Func objectiveFunction, int numberOfParameters, IList initialValues, IList lowerBounds, IList upperBounds, - Func gradient = null) : base(objectiveFunction, numberOfParameters) + Func gradient = null!) : base(objectiveFunction, numberOfParameters) { // Check if the length of the initial, lower and upper bounds equal the number of parameters if (initialValues.Count != numberOfParameters || lowerBounds.Count != numberOfParameters || upperBounds.Count != numberOfParameters) diff --git a/Numerics/Mathematics/Optimization/Local/GradientDescent.cs b/Numerics/Mathematics/Optimization/Local/GradientDescent.cs index fd4e64d5..d19320af 100644 --- a/Numerics/Mathematics/Optimization/Local/GradientDescent.cs +++ b/Numerics/Mathematics/Optimization/Local/GradientDescent.cs @@ -77,7 +77,7 @@ public class GradientDescent : Optimizer /// Optional. Function to evaluate the gradient. Default uses finite difference. public GradientDescent(Func objectiveFunction, int numberOfParameters, IList initialValues, IList lowerBounds, IList upperBounds, double alpha = 0.001, - Func gradient = null) : base(objectiveFunction, numberOfParameters) + Func gradient = null!) : base(objectiveFunction, numberOfParameters) { // Check if the length of the initial, lower and upper bounds equal the number of parameters if (initialValues.Count != numberOfParameters || lowerBounds.Count != numberOfParameters || upperBounds.Count != numberOfParameters) diff --git a/Numerics/Mathematics/Optimization/Support/Optimizer.cs b/Numerics/Mathematics/Optimization/Support/Optimizer.cs index fc22a0ea..f5605d56 100644 --- a/Numerics/Mathematics/Optimization/Support/Optimizer.cs +++ b/Numerics/Mathematics/Optimization/Support/Optimizer.cs @@ -60,7 +60,7 @@ protected Optimizer(Func objectiveFunction, int numberOfParame #region Inputs - private Func _objectiveFunction; + private Func _objectiveFunction = null!; /// /// The maximum number of optimization iterations allowed. Default = 10,000. @@ -142,7 +142,7 @@ public Func ObjectiveFunction /// /// A trace of the parameter set and fitness evaluated until convergence. /// - public List ParameterSetTrace { get; protected set; } + public List ParameterSetTrace { get; protected set; } = null!; /// /// Determines the optimization method status. @@ -152,7 +152,7 @@ public Func ObjectiveFunction /// /// The numerically differentiated Hessian matrix. This is only computed when the optimization is successful. /// - public Matrix Hessian { get; protected set; } + public Matrix Hessian { get; protected set; } = null!; #endregion @@ -166,7 +166,7 @@ public virtual void ClearResults() BestParameterSet = new ParameterSet(); ParameterSetTrace = new List(); Status = OptimizationStatus.None; - Hessian = null; + Hessian = null!; } /// @@ -287,7 +287,7 @@ protected virtual double RepairParameter(double value, double lowerBound, double /// /// Optimization status. /// Inner exception. - protected virtual void UpdateStatus(OptimizationStatus status, Exception exception = null) + protected virtual void UpdateStatus(OptimizationStatus status, Exception exception = null!) { Status = status; if (status == OptimizationStatus.MaximumIterationsReached) diff --git a/Numerics/Mathematics/Optimization/Support/ParameterSet.cs b/Numerics/Mathematics/Optimization/Support/ParameterSet.cs index e154df41..86a61141 100644 --- a/Numerics/Mathematics/Optimization/Support/ParameterSet.cs +++ b/Numerics/Mathematics/Optimization/Support/ParameterSet.cs @@ -94,14 +94,17 @@ public ParameterSet(XElement xElement) Values[i] = outVal; } } - if (xElement.Attribute(nameof(Fitness)) != null) + + var fitAttr = xElement.Attribute(nameof(Fitness)); + if (fitAttr != null) { - double.TryParse(xElement.Attribute(nameof(Fitness)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var fitness); + double.TryParse(fitAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var fitness); Fitness = fitness; } - if (xElement.Attribute(nameof(Weight)) != null) + var weightAttr = xElement.Attribute(nameof(Weight)); + if (weightAttr != null) { - double.TryParse(xElement.Attribute(nameof(Weight)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var weight); + double.TryParse(weightAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var weight); Weight = weight; } } @@ -109,7 +112,7 @@ public ParameterSet(XElement xElement) /// /// The trial parameter set values. /// - public double[] Values; + public double[] Values = null!; /// /// The objective function result (or fitness) given the trial parameter set. diff --git a/Numerics/Numerics.csproj b/Numerics/Numerics.csproj index 4c2823b6..380eca32 100644 --- a/Numerics/Numerics.csproj +++ b/Numerics/Numerics.csproj @@ -41,7 +41,7 @@ - + all runtime; build; native; contentfiles; analyzers @@ -64,11 +64,11 @@ - - - - - + + + + + diff --git a/Numerics/Sampling/Bootstrap/Bootstrap.cs b/Numerics/Sampling/Bootstrap/Bootstrap.cs index b8d694ca..c64db1c7 100644 --- a/Numerics/Sampling/Bootstrap/Bootstrap.cs +++ b/Numerics/Sampling/Bootstrap/Bootstrap.cs @@ -44,17 +44,17 @@ public class Bootstrap /// /// Delegate function for resampling the original data and model fit. /// - public Func ResampleFunction { get; set; } + public Func ResampleFunction { get; set; } = null!; /// /// Delegate function for fitting a model. /// - public Func FitFunction { get; set; } + public Func FitFunction { get; set; } = null!; /// /// Delegate function for extracting a statistic from the fit result. /// - public Func StatisticFunction { get; set; } + public Func StatisticFunction { get; set; } = null!; /// /// Number of bootstrap replicates. @@ -79,8 +79,8 @@ public Bootstrap(TData originalData, ParameterSet originalParameters) private TData _originalData; private ParameterSet _originalParameters; - private ParameterSet[] _bootstrapParameterSets; - private double[][] _bootstrapStatistics; + private ParameterSet[] _bootstrapParameterSets = null!; + private double[][] _bootstrapStatistics = null!; /// /// Gets the bootstrapped model parameter sets. diff --git a/Numerics/Sampling/MCMC/ARWMH.cs b/Numerics/Sampling/MCMC/ARWMH.cs index 845b4688..59bd92ab 100644 --- a/Numerics/Sampling/MCMC/ARWMH.cs +++ b/Numerics/Sampling/MCMC/ARWMH.cs @@ -76,8 +76,8 @@ public ARWMH(List priorDistributions, LogLikelihood log private Matrix sigmaIdentity; - private RunningCovarianceMatrix[] sigma; - private MultivariateNormal[] mvn; + private RunningCovarianceMatrix[] sigma = null!; + private MultivariateNormal[] mvn = null!; /// /// The scaling parameter used to scale the adaptive covariance matrix. diff --git a/Numerics/Sampling/MCMC/Base/MCMCSampler.cs b/Numerics/Sampling/MCMC/Base/MCMCSampler.cs index 5135ed16..0c92873a 100644 --- a/Numerics/Sampling/MCMC/Base/MCMCSampler.cs +++ b/Numerics/Sampling/MCMC/Base/MCMCSampler.cs @@ -176,17 +176,17 @@ public int ThinningInterval /// /// The master pseudo random number generator (PRNG). /// - protected Random _masterPRNG; + protected Random _masterPRNG = null!; /// /// The PRNG for each Markov Chain. /// - protected Random[] _chainPRNGs; + protected Random[] _chainPRNGs = null!; /// /// The current states of each chain. /// - protected ParameterSet[] _chainStates; + protected ParameterSet[] _chainStates = null!; /// /// The Log-Likelihood function to evaluate. @@ -251,12 +251,12 @@ public enum InitializationType /// /// The Multivariate Normal proposal distribution set from the MAP estimate. /// - protected MultivariateNormal _MVN; + protected MultivariateNormal _MVN = null!; /// /// Event is raised when the simulation progress changes. /// - public event ProgressChangedEventHandler ProgressChanged; + public event ProgressChangedEventHandler ProgressChanged = null!; /// /// Event is raised when the simulation progress changes. @@ -273,7 +273,7 @@ public enum InitializationType /// /// Cancellation token source. /// - public CancellationTokenSource CancellationTokenSource { get; set; } + public CancellationTokenSource CancellationTokenSource { get; set; } = null!; #endregion @@ -282,22 +282,22 @@ public enum InitializationType /// /// Gets the population matrix used for population-based samplers. /// - public List PopulationMatrix { get; protected set; } + public List PopulationMatrix { get; protected set; } = null!; /// /// Gets the list of sampled Markov Chains. /// - public List[] MarkovChains { get; protected set; } + public List[] MarkovChains { get; protected set; } = null!; /// /// Keeps track of the number of accepted samples per chain. /// - public int[] AcceptCount { get; protected set; } + public int[] AcceptCount { get; protected set; } = null!; /// /// Keeps track of the number of calls to the proposal sampler per chain. /// - public int[] SampleCount { get; protected set; } + public int[] SampleCount { get; protected set; } = null!; /// /// The acceptance rate per chain. @@ -316,7 +316,7 @@ public double[] AcceptanceRates /// /// The average log-likelihood across each chain for each iteration. /// - public List MeanLogLikelihood { get; protected set; } + public List MeanLogLikelihood { get; protected set; } = null!; /// /// Gets and sets the number of posterior parameter sets to output. @@ -326,7 +326,7 @@ public double[] AcceptanceRates /// /// Output posterior parameter sets. These are recorded after the iterations have been completed. /// - public List[] Output { get; protected set; } + public List[] Output { get; protected set; } = null!; /// /// The output parameter set that produced the maximum likelihood. diff --git a/Numerics/Sampling/MCMC/HMC.cs b/Numerics/Sampling/MCMC/HMC.cs index 60ebc801..36ee7181 100644 --- a/Numerics/Sampling/MCMC/HMC.cs +++ b/Numerics/Sampling/MCMC/HMC.cs @@ -80,7 +80,7 @@ public class HMC : MCMCSampler /// Optional. The leapfrog step size. Default = 0.1. /// Optional. The number of leapfrog steps. Default = 50. /// Optional. The function for evaluating the gradient of the log-likelihood. Numerical finite difference will be used by default. - public HMC(List priorDistributions, LogLikelihood logLikelihoodFunction, Vector mass = null, double stepSize = 0.1, int steps = 50, Gradient gradientFunction = null) : base(priorDistributions, logLikelihoodFunction) + public HMC(List priorDistributions, LogLikelihood logLikelihoodFunction, Vector mass = null!, double stepSize = 0.1, int steps = 50, Gradient gradientFunction = null!) : base(priorDistributions, logLikelihoodFunction) { InitialIterations = 100 * NumberOfParameters; diff --git a/Numerics/Sampling/MCMC/RWMH.cs b/Numerics/Sampling/MCMC/RWMH.cs index a516ced8..d3801c8c 100644 --- a/Numerics/Sampling/MCMC/RWMH.cs +++ b/Numerics/Sampling/MCMC/RWMH.cs @@ -69,7 +69,7 @@ public RWMH(List priorDistributions, LogLikelihood logL ProposalSigma = proposalSigma; } - private MultivariateNormal[] mvn; + private MultivariateNormal[] mvn = null!; /// /// The covariance matrix Σ (sigma) for the proposal distribution. diff --git a/Numerics/Sampling/MCMC/SNIS.cs b/Numerics/Sampling/MCMC/SNIS.cs index ddc09371..5a0561a5 100644 --- a/Numerics/Sampling/MCMC/SNIS.cs +++ b/Numerics/Sampling/MCMC/SNIS.cs @@ -59,7 +59,7 @@ public class SNIS : MCMCSampler /// The list of prior distributions for the model parameters. /// The Log-Likelihood function to evaluate. /// Optional. The multivariate Normal distribution is used for importance sampling. If null, naive Monte Carlo is performed. - public SNIS(List priorDistributions, LogLikelihood logLikelihoodFunction, MultivariateNormal multivariateNormal = null) : base(priorDistributions, logLikelihoodFunction) + public SNIS(List priorDistributions, LogLikelihood logLikelihoodFunction, MultivariateNormal multivariateNormal = null!) : base(priorDistributions, logLikelihoodFunction) { mvn = multivariateNormal; useImportanceSampling = multivariateNormal != null ? true : false; @@ -75,7 +75,7 @@ public SNIS(List priorDistributions, LogLikelihood logL } private bool useImportanceSampling = false; - private MultivariateNormal mvn = null; + private MultivariateNormal mvn = null!; /// protected override void InitializeCustomSettings() diff --git a/Numerics/Sampling/MCMC/Support/MCMCResults.cs b/Numerics/Sampling/MCMC/Support/MCMCResults.cs index 9aae090f..7ba6bb2a 100644 --- a/Numerics/Sampling/MCMC/Support/MCMCResults.cs +++ b/Numerics/Sampling/MCMC/Support/MCMCResults.cs @@ -94,31 +94,31 @@ public MCMCResults(ParameterSet map, IList parameterSets, double a /// The list of sampled Markov Chains. /// [JsonInclude] - public List[] MarkovChains { get; private set; } + public List[] MarkovChains { get; private set; } = null!; /// /// Output posterior parameter sets. /// [JsonInclude] - public List Output { get; private set; } + public List Output { get; private set; } = null!; /// /// The average log-likelihood across each chain for each iteration. /// [JsonInclude] - public List MeanLogLikelihood { get; private set; } + public List MeanLogLikelihood { get; private set; } = null!; /// /// The acceptance rate for each chain. /// [JsonInclude] - public double[] AcceptanceRates { get; private set; } + public double[] AcceptanceRates { get; private set; } = null!; /// /// Parameter results using the output posterior parameter sets. /// [JsonInclude] - public ParameterResults[] ParameterResults { get; private set; } + public ParameterResults[] ParameterResults { get; private set; } = null!; /// /// The output parameter set that produced the maximum likelihood. @@ -210,7 +210,8 @@ public static MCMCResults FromByteArray(byte[] bytes) }; try { - return JsonSerializer.Deserialize(bytes, options); + return JsonSerializer.Deserialize(bytes, options) + ?? throw new JsonException("Deserialized MCMCResults was null"); } catch { diff --git a/Numerics/Sampling/MCMC/Support/ParameterResults.cs b/Numerics/Sampling/MCMC/Support/ParameterResults.cs index c96c7fa8..7ac720a0 100644 --- a/Numerics/Sampling/MCMC/Support/ParameterResults.cs +++ b/Numerics/Sampling/MCMC/Support/ParameterResults.cs @@ -102,7 +102,7 @@ public ParameterResults(double[] values, double alpha = 0.1, bool sorted = false /// /// The autocorrelation function for each parameter. This is averaged across each chain. /// - public double[,] Autocorrelation { get; set; } + public double[,] Autocorrelation { get; set; } = null!; } } diff --git a/Numerics/Sampling/SobolSequence.cs b/Numerics/Sampling/SobolSequence.cs index c8ea8c39..4f6b753e 100644 --- a/Numerics/Sampling/SobolSequence.cs +++ b/Numerics/Sampling/SobolSequence.cs @@ -136,8 +136,8 @@ private void initialize() reader.ReadLine(); int index = 1; - string line = null; - while ((line = reader.ReadLine()) != null) + string? line; + while ((line = reader.ReadLine()) is not null) { var st = line.Split(' '); diff --git a/Numerics/Sampling/StratificationBin.cs b/Numerics/Sampling/StratificationBin.cs index 0cfc3c64..db403ae7 100644 --- a/Numerics/Sampling/StratificationBin.cs +++ b/Numerics/Sampling/StratificationBin.cs @@ -83,21 +83,24 @@ public StratificationBin(double lowerBound, double upperBound, double weight = - public StratificationBin(XElement element) { // Get required data - if (element.Attribute(nameof(LowerBound)) != null) + var lowerBoundAttr = element.Attribute(nameof(LowerBound)); + if (lowerBoundAttr != null) { - double.TryParse(element.Attribute(nameof(LowerBound)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var lower); + double.TryParse(lowerBoundAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var lower); LowerBound = lower; } - if (element.Attribute(nameof(UpperBound)) != null) + var upperBoundAttr = element.Attribute(nameof(UpperBound)); + if (upperBoundAttr != null) { - double.TryParse(element.Attribute(nameof(UpperBound)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var upper); + double.TryParse(upperBoundAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var upper); UpperBound = upper; } - if (element.Attribute(nameof(Weight)) != null) + var weightAttr = element.Attribute(nameof(Weight)); + if (weightAttr != null) { - double.TryParse(element.Attribute(nameof(Weight)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var weight); + double.TryParse(weightAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var weight); Weight = weight; } } @@ -142,8 +145,11 @@ public bool Contains(double x) /// +1 if this bin is lower than the compared bin. /// -1 otherwise. /// - public int CompareTo(StratificationBin other) + public int CompareTo(StratificationBin? other) { + if (other == null) + throw new ArgumentNullException(nameof(other), "The stratification bin to compare to cannot be null."); + if (UpperBound > other.LowerBound && LowerBound < other.UpperBound) throw new ArgumentException("The bins cannot be overlapping.", nameof(other)); @@ -164,7 +170,7 @@ public object Clone() /// /// Checks whether two stratification bins are equal. /// - public override bool Equals(object obj) + public override bool Equals(object? obj) { if (!(obj is StratificationBin)) return false; diff --git a/Numerics/Sampling/StratificationOptions.cs b/Numerics/Sampling/StratificationOptions.cs index 94c1a45b..daf1032d 100644 --- a/Numerics/Sampling/StratificationOptions.cs +++ b/Numerics/Sampling/StratificationOptions.cs @@ -98,27 +98,31 @@ public StratificationOptions(double lowerBound, double upperBound, int numberOfB public StratificationOptions(XElement element) { // Get required data - if (element.Attribute(nameof(LowerBound)) != null) + var lowerBoundAttr = element.Attribute(nameof(LowerBound)); + if (lowerBoundAttr != null) { - double.TryParse(element.Attribute(nameof(LowerBound)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var lower); + double.TryParse(lowerBoundAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var lower); LowerBound = lower; } - if (element.Attribute(nameof(UpperBound)) != null) + var upperBoundAttr = element.Attribute(nameof(UpperBound)); + if (upperBoundAttr != null) { - double.TryParse(element.Attribute(nameof(UpperBound)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var upper); + double.TryParse(upperBoundAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var upper); UpperBound = upper; } - if (element.Attribute(nameof(NumberOfBins)) != null) + var numberOfBinsAttr = element.Attribute(nameof(NumberOfBins)); + if (numberOfBinsAttr != null) { - int.TryParse(element.Attribute(nameof(NumberOfBins)).Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var nBins); + int.TryParse(numberOfBinsAttr.Value, NumberStyles.Any, CultureInfo.InvariantCulture, out var nBins); NumberOfBins = nBins; } - if (element.Attribute(nameof(IsProbability)) != null) + var isProbabilityAttr = element.Attribute(nameof(IsProbability)); + if (isProbabilityAttr != null) { - bool.TryParse(element.Attribute(nameof(IsProbability)).Value, out var isProbability); + bool.TryParse(isProbabilityAttr.Value, out var isProbability); IsProbability = isProbability; } @@ -228,7 +232,7 @@ private void Validate() /// /// Compares objects for equality. /// - public override bool Equals(object obj) + public override bool Equals(object? obj) { if (obj is not StratificationOptions other) return false; diff --git a/Numerics/Sampling/Stratify.cs b/Numerics/Sampling/Stratify.cs index 676b540a..2519d910 100644 --- a/Numerics/Sampling/Stratify.cs +++ b/Numerics/Sampling/Stratify.cs @@ -302,7 +302,7 @@ public static List Probabilities(StratificationOptions option /// The number of dimensions to stratify. /// Seed for random number generator. /// The correlation matrix. If null, independence is assumed. - public static List> MultivariateProbabilities(StratificationOptions options, ImportanceDistribution distributionType = ImportanceDistribution.Uniform, bool isExhaustive = true, int dimension = 1, int seed = -1, double[,] correlation = null) + public static List> MultivariateProbabilities(StratificationOptions options, ImportanceDistribution distributionType = ImportanceDistribution.Uniform, bool isExhaustive = true, int dimension = 1, int seed = -1, double[,] correlation = null!) { // Validate inputs var output = new List>(); diff --git a/Numerics/Utilities/ExtensionMethods.cs b/Numerics/Utilities/ExtensionMethods.cs index c83f06bc..e9f66ad9 100644 --- a/Numerics/Utilities/ExtensionMethods.cs +++ b/Numerics/Utilities/ExtensionMethods.cs @@ -63,7 +63,7 @@ public static T GetAttributeOfType(this Enum enumValue) where T : Attribute var type = enumValue.GetType(); var memInfo = type.GetMember(enumValue.ToString()); var attributes = memInfo[0].GetCustomAttributes(typeof(T), false); - return (attributes.Length > 0) ? (T)attributes[0] : null; + return (attributes.Length > 0) ? (T)attributes[0] : null!; } #endregion diff --git a/Numerics/Utilities/JsonConverters.cs b/Numerics/Utilities/JsonConverters.cs index 79877aaa..cc079278 100644 --- a/Numerics/Utilities/JsonConverters.cs +++ b/Numerics/Utilities/JsonConverters.cs @@ -78,14 +78,14 @@ public class Double2DArrayConverter : JsonConverter public override double[,] Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { if (reader.TokenType == JsonTokenType.Null) - return null; + return null!; if (reader.TokenType != JsonTokenType.StartObject) throw new JsonException("Expected StartObject token"); int rows = 0; int cols = 0; - double[] data = null; + double[]? data = null!; while (reader.Read()) { @@ -94,7 +94,7 @@ public class Double2DArrayConverter : JsonConverter if (reader.TokenType == JsonTokenType.PropertyName) { - string propertyName = reader.GetString(); + string? propertyName = reader.GetString(); reader.Read(); switch (propertyName) @@ -217,14 +217,14 @@ public class String2DArrayConverter : JsonConverter public override string[,] Read(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options) { if (reader.TokenType == JsonTokenType.Null) - return null; + return null!; if (reader.TokenType != JsonTokenType.StartObject) throw new JsonException("Expected StartObject token"); int rows = 0; int cols = 0; - string[] data = null; + string[]? data = null!; while (reader.Read()) { @@ -233,7 +233,7 @@ public class String2DArrayConverter : JsonConverter if (reader.TokenType == JsonTokenType.PropertyName) { - string propertyName = reader.GetString(); + string? propertyName = reader.GetString(); reader.Read(); switch (propertyName) @@ -371,13 +371,13 @@ public class UnivariateDistributionConverter : JsonConverter 0) + if (distribution != null! && parameters != null && parameters.Length > 0) { distribution.SetParameters(parameters); } - return distribution; + return distribution!; } catch { // If we can't recreate it, return null - return null; + return null!; } } @@ -443,7 +443,7 @@ public override UnivariateDistributionBase Read(ref Utf8JsonReader reader, Type /// public override void Write(Utf8JsonWriter writer, UnivariateDistributionBase value, JsonSerializerOptions options) { - if (value == null) + if (value == null!) { writer.WriteNullValue(); return; diff --git a/Numerics/Utilities/SafeProgressReporter.cs b/Numerics/Utilities/SafeProgressReporter.cs index 2e1c4973..b2817074 100644 --- a/Numerics/Utilities/SafeProgressReporter.cs +++ b/Numerics/Utilities/SafeProgressReporter.cs @@ -79,12 +79,12 @@ public SafeProgressReporter(string taskName) private double _previousProgress = -0.0000000000001d; private string _previousMessage = ""; private MessageType _previousMessageType = MessageType.Status; - private Process _externalProcess; + private Process _externalProcess = null!; private List _subProgReporterCollection = new List(); private CancellationTokenSource _cancellationTokenSource = new CancellationTokenSource(); protected readonly SendOrPostCallback _invokeProgressHandlers; protected readonly SendOrPostCallback _invokeMessageHandlers; - protected SynchronizationContext _synchronizationContext; + protected SynchronizationContext? _synchronizationContext; /// /// Returns the most recent progress. @@ -104,7 +104,7 @@ public SafeProgressReporter(string taskName) /// /// Returns the task name. /// - public string TaskName { get; private set; } + public string TaskName { get; private set; } = null!; /// /// Returns the most recent message type. @@ -132,7 +132,7 @@ public ReadOnlyCollection ChildReporters /// /// Event is raised when the progress is reported. /// - public event ProgressReportedEventHandler ProgressReported; + public event ProgressReportedEventHandler ProgressReported = null!; /// /// Delegate for handling progress reported events. @@ -145,7 +145,7 @@ public ReadOnlyCollection ChildReporters /// /// Event is raised when a message is reported. /// - public event MessageReportedEventHandler MessageReported; + public event MessageReportedEventHandler MessageReported = null!; /// /// Delegate for handling message reported events. @@ -156,7 +156,7 @@ public ReadOnlyCollection ChildReporters /// /// Event is raised when the task starts. /// - public event TaskStartedEventHandler TaskStarted; + public event TaskStartedEventHandler TaskStarted = null!; /// /// Delegate for handling task started events. @@ -166,7 +166,7 @@ public ReadOnlyCollection ChildReporters /// /// Event is raised when the task ended. /// - public event TaskEndedEventHandler TaskEnded; + public event TaskEndedEventHandler TaskEnded = null!; /// /// Delegate for handling task ended events. @@ -176,7 +176,7 @@ public ReadOnlyCollection ChildReporters /// /// Event is raised when a child reporter is created. /// - public event ChildReporterCreatedEventHandler ChildReporterCreated; + public event ChildReporterCreatedEventHandler ChildReporterCreated = null!; /// /// Delegate for handling child reporter created events. @@ -287,9 +287,9 @@ public void ReportError(string message) /// Invokes the progress handlers. /// /// The object. - private void InvokeProgressHandlers(object state) + private void InvokeProgressHandlers(object? state) { - double prog = ((double[])state)[0]; + double prog = ((double[])state!)[0]; double prevProg = ((double[])state)[1]; if (prevProg < 0d) prevProg = 0d; @@ -301,9 +301,9 @@ private void InvokeProgressHandlers(object state) /// Invokes the message handlers. /// /// The object. - private void InvokeMessageHandlers(object state) + private void InvokeMessageHandlers(object? state) { - MessageContentStruct prog = (MessageContentStruct)state; + MessageContentStruct prog = (MessageContentStruct)state!; OnMessageReported(prog); MessageReported?.Invoke(prog); } @@ -353,7 +353,7 @@ public SafeProgressReporter CreateProgressModifier(float fractionOfTotal, string if (string.IsNullOrEmpty(subTaskName)) subTaskName = TaskName; var child = new SafeProgressReporter(subTaskName); - child.SetContext(_synchronizationContext); + child.SetContext(_synchronizationContext!); child._previousProgress = 0d; child.ProgressReported += (reporter, prog, progDelta) => ReportProgress(_previousProgress + progDelta * fractionOfTotal); child.MessageReported += msg => ReportMessage(msg); diff --git a/Numerics/Utilities/Tools.cs b/Numerics/Utilities/Tools.cs index 9479887e..5cad3c99 100644 --- a/Numerics/Utilities/Tools.cs +++ b/Numerics/Utilities/Tools.cs @@ -783,7 +783,7 @@ public static double[] Sequence(double start, double end, double step = 1) /// An array of bytes. public static byte[] Compress(byte[] data) { - if (data is null) return null; + if (data is null) return null!; var output = new MemoryStream(); using (var dstream = new DeflateStream(output, CompressionLevel.Optimal)) { @@ -798,7 +798,7 @@ public static byte[] Compress(byte[] data) /// An array of bytes. public static byte[] Decompress(byte[] data) { - if (data is null) return null; + if (data is null) return null!; var input = new MemoryStream(data); var output = new MemoryStream(); using (var dstream = new DeflateStream(input, CompressionMode.Decompress)) diff --git a/Test_Numerics/Data/Interpolation/Test_Bilinear.cs b/Test_Numerics/Data/Interpolation/Test_Bilinear.cs index 0c710915..540ff239 100644 --- a/Test_Numerics/Data/Interpolation/Test_Bilinear.cs +++ b/Test_Numerics/Data/Interpolation/Test_Bilinear.cs @@ -93,7 +93,7 @@ public void Test_BiLinear() double x1 = 350d; double x2 = 75d; double y = bilinear.Interpolate(x1, x2); - Assert.AreEqual(y, 874.84d, 1E-6); + Assert.AreEqual(874.84d, y, 1E-6); } /// @@ -136,31 +136,31 @@ public void Test_Log() var LogLinLin = new Bilinear(x1Array, x2Array, yArray) { X1Transform = Transform.Logarithmic }; double y1 = LogLinLin.Interpolate(x1, x2); - Assert.AreEqual(y1, 874.909523653025d, 1E-6); + Assert.AreEqual(874.909523653025d, y1, 1E-6); var LinLogLin = new Bilinear(x1Array, x2Array, yArray) { X2Transform = Transform.Logarithmic }; double y2 = LinLogLin.Interpolate(x1, x2); - Assert.AreEqual(y2, 875.919023759159d, 1E-6); + Assert.AreEqual(875.919023759159d, y2, 1E-6); var LinLinLog = new Bilinear(x1Array, x2Array, yArray) { YTransform = Transform.Logarithmic }; double y3 = LinLinLog.Interpolate(x1, x2); - Assert.AreEqual(y3, 874.8164, 1E-4); + Assert.AreEqual(874.8164, y3, 1E-4); var LinLogLog = new Bilinear(x1Array, x2Array, yArray) { X2Transform = Transform.Logarithmic, YTransform = Transform.Logarithmic }; double y4 = LinLogLog.Interpolate(x1, x2); - Assert.AreEqual(y4, 875.896104342695d, 1E-6); + Assert.AreEqual(875.896104342695d, y4, 1E-6); var LogLogLin = new Bilinear(x1Array, x2Array, yArray) { X1Transform = Transform.Logarithmic, X2Transform = Transform.Logarithmic }; double y5 = LogLogLin.Interpolate(x1, x2); - Assert.AreEqual(y5, 875.9855, 1E-4); + Assert.AreEqual(875.9855, y5, 1E-4); var LogLinLog = new Bilinear(x1Array, x2Array, yArray) { X1Transform = Transform.Logarithmic, YTransform = Transform.Logarithmic }; double y6 = LogLinLog.Interpolate(x1, x2); - Assert.AreEqual(y6, 874.886, 1E-4); + Assert.AreEqual(874.886, y6, 1E-4); var LogLogLog = new Bilinear(x1Array, x2Array, yArray) { X1Transform = Transform.Logarithmic, X2Transform = Transform.Logarithmic, YTransform = Transform.Logarithmic }; double y7 = LogLogLog.Interpolate(x1, x2); - Assert.AreEqual(y7, 875.962713889793d, 1E-6); + Assert.AreEqual(875.962713889793d, y7, 1E-6); } @@ -204,15 +204,15 @@ public void Test_Z_X() var ZLinLin = new Bilinear(x1Array, x2Array, yArray) { X1Transform = Transform.NormalZ }; double y1 = ZLinLin.Interpolate(x1, x2); - Assert.AreEqual(y1, 890.8358, 1E-4); + Assert.AreEqual(890.8358, y1, 1E-4); var LinZLin = new Bilinear(x1Array, x2Array, yArray) { X2Transform = Transform.NormalZ }; double y2 = LinZLin.Interpolate(x1, x2); - Assert.AreEqual(y2, 890.7267, 1E-4); + Assert.AreEqual(890.7267, y2, 1E-4); var ZZLin = new Bilinear(x1Array, x2Array, yArray) { X1Transform = Transform.NormalZ, X2Transform = Transform.NormalZ }; double y3 = ZZLin.Interpolate(x1, x2); - Assert.AreEqual(y3, 890.6835, 1E-4); + Assert.AreEqual(890.6835, y3, 1E-4); } /// @@ -255,19 +255,19 @@ public void Test_Z_Y() var LinLinZ = new Bilinear(x1Array, x2Array, yArray) { YTransform = Transform.NormalZ }; double y1 = LinLinZ.Interpolate(x1, x2); - Assert.AreEqual(y1, 0.9596228, 1E-6); + Assert.AreEqual(0.9596228, y1, 1E-6); var LinZZ = new Bilinear(x1Array, x2Array, yArray) { X2Transform = Transform.NormalZ, YTransform = Transform.NormalZ }; double y2 = LinZZ.Interpolate(x1, x2); - Assert.AreEqual(y2, 0.95946, 1E-6); + Assert.AreEqual(0.95946, y2, 1E-6); var ZLinZ = new Bilinear(x1Array, x2Array, yArray) { X1Transform = Transform.NormalZ, YTransform = Transform.NormalZ }; double y3 = ZLinZ.Interpolate(x1, x2); - Assert.AreEqual(y3, 0.9595799, 1E-6); + Assert.AreEqual(0.9595799, y3, 1E-6); var ZZZ = new Bilinear(x1Array, x2Array, yArray) { X1Transform = Transform.NormalZ, X2Transform = Transform.NormalZ, YTransform = Transform.NormalZ }; double y4 = ZZZ.Interpolate(x1, x2); - Assert.AreEqual(y4, 0.9594168, 1E-6); + Assert.AreEqual(0.9594168, y4, 1E-6); } /// @@ -311,46 +311,46 @@ public void Test_BilinearEdgeCases() double x1 = 50; double x2 = 28; double y = bilinear.Interpolate(x1, x2); - Assert.AreEqual(y, 850.36, 1E-6); + Assert.AreEqual(850.36, y, 1E-6); // Top Right x1 = 50; x2 = 300; y = bilinear.Interpolate(x1, x2); - Assert.AreEqual(y, 928.87, 1E-6); + Assert.AreEqual(928.87, y, 1E-6); // Bottom Left x1 = 600; x2 = 25; y = bilinear.Interpolate(x1, x2); - Assert.AreEqual(y, 871.84, 1E-6); + Assert.AreEqual(871.84, y, 1E-6); // Bottom Right x1 = 600; x2 = 300; y = bilinear.Interpolate(x1, x2); - Assert.AreEqual(y, 929.68, 1E-6); + Assert.AreEqual(929.68, y, 1E-6); // Ascending - x1 out // Top x1 = 50; x2 = 75; y = bilinear.Interpolate(x1, x2); - Assert.AreEqual(y, 859.405, 1E-6); + Assert.AreEqual(859.405, y, 1E-6); // Bottom x1 = 600; x2 = 225; y = bilinear.Interpolate(x1, x2); - Assert.AreEqual(y, 924.93, 1E-6); + Assert.AreEqual(924.93, y, 1E-6); // Ascending - x2 out // Top x1 = 125; x2 = 25; y = bilinear.Interpolate(x1, x2); - Assert.AreEqual(y, 854.11750, 1E-6); + Assert.AreEqual(854.11750, y, 1E-6); // Bottom x1 = 450; x2 = 300; y = bilinear.Interpolate(x1, x2); - Assert.AreEqual(y, 929.65000, 1E-6); + Assert.AreEqual(929.65000, y, 1E-6); } } diff --git a/Test_Numerics/Data/Interpolation/Test_CubicSpline.cs b/Test_Numerics/Data/Interpolation/Test_CubicSpline.cs index 18f1e3b1..51faa09a 100644 --- a/Test_Numerics/Data/Interpolation/Test_CubicSpline.cs +++ b/Test_Numerics/Data/Interpolation/Test_CubicSpline.cs @@ -66,12 +66,12 @@ public void Test_Sequential() values[i - 1] = i; var spline = new CubicSpline(values, values); var lo = spline.SequentialSearch(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871,lo); Array.Reverse(values); var spline2 = new CubicSpline(values, values, SortOrder.Descending); lo = spline2.SequentialSearch(872.5); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127, lo); } /// @@ -85,12 +85,12 @@ public void Test_Bisection() values[i - 1] = i; var spline = new CubicSpline(values, values); var lo = spline.BisectionSearch(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871, lo); Array.Reverse(values); var spline2 = new CubicSpline(values, values, SortOrder.Descending); lo = spline2.BisectionSearch(872.5); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127,lo); } /// @@ -104,12 +104,12 @@ public void Test_Hunt() values[i - 1] = i; var spline = new CubicSpline(values, values); var lo = spline.HuntSearch(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871,lo); Array.Reverse(values); var spline2 = new CubicSpline(values, values, SortOrder.Descending); lo = spline2.HuntSearch(872.5); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127, lo); } /// @@ -123,7 +123,7 @@ public void Test_Interpolate() var spline = new CubicSpline(XArray, YArray); double X = 8d; double Y = spline.Interpolate(X); - Assert.AreEqual(Y, 11.4049889205445d, 1E-6); + Assert.AreEqual(11.4049889205445d, Y, 1E-6); } /// diff --git a/Test_Numerics/Data/Interpolation/Test_Linear.cs b/Test_Numerics/Data/Interpolation/Test_Linear.cs index f126a074..646c28c3 100644 --- a/Test_Numerics/Data/Interpolation/Test_Linear.cs +++ b/Test_Numerics/Data/Interpolation/Test_Linear.cs @@ -65,12 +65,12 @@ public void Test_Sequential() values[i - 1] = i; var LI = new Linear(values, values); var lo = LI.SequentialSearch(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871,lo); Array.Reverse(values); LI = new Linear(values, values, SortOrder.Descending); lo = LI.SequentialSearch(872.5); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127,lo); } /// @@ -84,12 +84,12 @@ public void Test_Bisection() values[i - 1] = i; var LI = new Linear(values, values); var lo = LI.BisectionSearch(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871, lo); Array.Reverse(values); LI = new Linear(values, values, SortOrder.Descending); lo = LI.BisectionSearch(872.5); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127, lo); } /// @@ -103,12 +103,12 @@ public void Test_Hunt() values[i - 1] = i; var LI = new Linear(values, values); var lo = LI.HuntSearch(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871, lo); Array.Reverse(values); LI = new Linear(values, values, SortOrder.Descending); lo = LI.HuntSearch(872.5); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127, lo); } /// @@ -122,7 +122,7 @@ public void Test_Lin() var LI = new Linear(XArray, YArray); double X = 75d; double Y = LI.Interpolate(X); - Assert.AreEqual(Y, 150.0d, 1E-6); + Assert.AreEqual(150.0d, Y, 1E-6); } /// @@ -155,15 +155,15 @@ public void Test_Log() var LinLog = new Linear(XArray, YArray) { YTransform = Transform.Logarithmic }; double Y1 = LinLog.Interpolate(X); - Assert.AreEqual(Y1, 141.42135623731d, 1E-6); + Assert.AreEqual(141.42135623731d, Y1, 1E-6); var LogLin = new Linear(XArray, YArray) { XTransform = Transform.Logarithmic }; double Y2 = LogLin.Interpolate(X); - Assert.AreEqual(Y2, 158.496250072116d, 1E-6); + Assert.AreEqual(158.496250072116d, Y2, 1E-6); ; var LogLog = new Linear(XArray, YArray) { XTransform = Transform.Logarithmic, YTransform = Transform.Logarithmic }; double Y3 = LogLog.Interpolate(X); - Assert.AreEqual(Y3, 150.0d, 1E-6); + Assert.AreEqual(150.0d, Y3, 1E-6); } /// @@ -214,15 +214,15 @@ public void Test_Z() var LinZ = new Linear(XArray, YArray) { YTransform = Transform.NormalZ }; double Y1 = LinZ.Interpolate(X); - Assert.AreEqual(Y1, 0.358762529d, 1E-6); + Assert.AreEqual(0.358762529d, Y1, 1E-6); var ZLin = new Linear(XArray, YArray) { XTransform = Transform.NormalZ }; double Y2 = ZLin.Interpolate(X); - Assert.AreEqual(Y2, 0.362146174d, 1E-6); + Assert.AreEqual(0.362146174d, Y2, 1E-6); var ZZ = new Linear(XArray, YArray) { XTransform = Transform.NormalZ, YTransform = Transform.NormalZ }; double Y3 = ZZ.Interpolate(X); - Assert.AreEqual(Y3, 0.36093855992815d, 1E-6); + Assert.AreEqual(0.36093855992815d, Y3, 1E-6); } /// @@ -274,7 +274,7 @@ public void Test_RevLin() var LI = new Linear(XArray, YArray, SortOrder.Descending); double X = 75d; double Y = LI.Interpolate(X); - Assert.AreEqual(Y, 150.0d, 1E-6); + Assert.AreEqual(150.0d, Y, 1E-6); } /// @@ -292,15 +292,15 @@ public void Test_Rev_Log() var LinLog = new Linear(XArray, YArray, SortOrder.Descending) { YTransform = Transform.Logarithmic }; double Y1 = LinLog.Interpolate(X); - Assert.AreEqual(Y1, 141.42135623731d, 1E-6); + Assert.AreEqual(141.42135623731d, Y1, 1E-6); var LogLin = new Linear(XArray, YArray, SortOrder.Descending) { XTransform = Transform.Logarithmic }; double Y2 = LogLin.Interpolate(X); - Assert.AreEqual(Y2, 158.496250072116d, 1E-6); + Assert.AreEqual(158.496250072116d, Y2, 1E-6); var LogLog = new Linear(XArray, YArray, SortOrder.Descending) { XTransform = Transform.Logarithmic, YTransform = Transform.Logarithmic }; double Y3 = LogLog.Interpolate(X); - Assert.AreEqual(Y3, 150.0d, 1E-6); + Assert.AreEqual(150.0d, Y3, 1E-6); } /// @@ -318,15 +318,15 @@ public void Test_Rev_Z() var LinZ = new Linear(XArray, YArray, SortOrder.Descending) { YTransform = Transform.NormalZ }; double Y1 = LinZ.Interpolate(X); - Assert.AreEqual(Y1, 0.358762529d, 1E-6); + Assert.AreEqual(0.358762529d, Y1, 1E-6); var ZLin = new Linear(XArray, YArray, SortOrder.Descending) { XTransform = Transform.NormalZ }; double Y2 = ZLin.Interpolate(X); - Assert.AreEqual(Y2, 0.362146174d, 1E-6); + Assert.AreEqual(0.362146174d, Y2, 1E-6); var ZZ = new Linear(XArray, YArray, SortOrder.Descending) { XTransform = Transform.NormalZ, YTransform = Transform.NormalZ }; double Y3 = ZZ.Interpolate(X); - Assert.AreEqual(Y3, 0.36093855992815d, 1E-6); + Assert.AreEqual(0.36093855992815d, Y3, 1E-6); } // ??? diff --git a/Test_Numerics/Data/Interpolation/Test_Polynomial.cs b/Test_Numerics/Data/Interpolation/Test_Polynomial.cs index f9c291fc..be9e710e 100644 --- a/Test_Numerics/Data/Interpolation/Test_Polynomial.cs +++ b/Test_Numerics/Data/Interpolation/Test_Polynomial.cs @@ -66,12 +66,12 @@ public void Test_Sequential() values[i - 1] = i; var poly = new Polynomial(3, values, values); var lo = poly.SequentialSearch(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871, lo ); Array.Reverse(values); var poly2 = new Polynomial(3, values, values, SortOrder.Descending); lo = poly2.SequentialSearch(872.5); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127,lo); } /// @@ -85,12 +85,12 @@ public void Test_Bisection() values[i - 1] = i; var poly = new Polynomial(3, values, values); var lo = poly.BisectionSearch(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871,lo); Array.Reverse(values); var poly2 = new Polynomial(3, values, values, SortOrder.Descending); lo = poly2.BisectionSearch(872.5); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127,lo); } /// @@ -104,12 +104,12 @@ public void Test_Hunt() values[i - 1] = i; var poly = new Polynomial(3, values, values); var lo = poly.HuntSearch(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871,lo); Array.Reverse(values); var poly2 = new Polynomial(3, values, values, SortOrder.Descending); lo = poly2.HuntSearch(872.5); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127,lo); } /// @@ -123,7 +123,7 @@ public void Test_Interpolate_Order3() var poly = new Polynomial(3, XArray, YArray); double X = 8d; double Y = poly.Interpolate(X); - Assert.AreEqual(Y, 11.5415808882467, 1E-6); + Assert.AreEqual(11.5415808882467, Y, 1E-6); } /// diff --git a/Test_Numerics/Data/Paired Data/Test_Ordinate.cs b/Test_Numerics/Data/Paired Data/Test_Ordinate.cs index 2bcb3f4d..a3437a3a 100644 --- a/Test_Numerics/Data/Paired Data/Test_Ordinate.cs +++ b/Test_Numerics/Data/Paired Data/Test_Ordinate.cs @@ -71,14 +71,14 @@ public void Test_Construction() var ordinate4 = new Ordinate(double.NaN, 4); Assert.AreEqual(ordinate1, ordinate2); - Assert.AreEqual(ordinate1.X, 2); - Assert.AreEqual(ordinate1.Y, 4); - Assert.AreEqual(ordinate2.X, 2); - Assert.AreEqual(ordinate2.Y, 4); + Assert.AreEqual(2,ordinate1.X); + Assert.AreEqual(4, ordinate1.Y); + Assert.AreEqual(2, ordinate2.X); + Assert.AreEqual(4, ordinate2.Y); - Assert.AreEqual(ordinate1.IsValid, true); - Assert.AreEqual(ordinate3.IsValid, false); - Assert.AreEqual(ordinate4.IsValid, false); + Assert.IsTrue(ordinate1.IsValid); + Assert.IsFalse(ordinate3.IsValid); + Assert.IsFalse(ordinate4.IsValid); Assert.AreNotEqual(ordinate1, ordinate3); diff --git a/Test_Numerics/Data/Paired Data/Test_PairedDataInterpolation.cs b/Test_Numerics/Data/Paired Data/Test_PairedDataInterpolation.cs index 8f198375..5ff08e81 100644 --- a/Test_Numerics/Data/Paired Data/Test_PairedDataInterpolation.cs +++ b/Test_Numerics/Data/Paired Data/Test_PairedDataInterpolation.cs @@ -56,10 +56,10 @@ public void Test_Sequential() opd.Add(new Ordinate(i, i)); // X var lo = opd.SequentialSearchX(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871,lo); // Y lo = opd.SequentialSearchY(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871, lo); // DSC opd = new OrderedPairedData(true, SortOrder.Descending, false, SortOrder.Descending); @@ -67,10 +67,10 @@ public void Test_Sequential() opd.Add(new Ordinate(i, i)); // X lo = opd.SequentialSearchX(872.5d); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127, lo); // Y lo = opd.SequentialSearchY(872.5d); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127, lo); } @@ -86,10 +86,10 @@ public void Test_Bisection() opd.Add(new Ordinate(i, i)); // X var lo = opd.BisectionSearchX(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871, lo); // Y lo = opd.BisectionSearchY(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871, lo); // DSC opd = new OrderedPairedData(true, SortOrder.Descending, false, SortOrder.Descending); @@ -97,10 +97,10 @@ public void Test_Bisection() opd.Add(new Ordinate(i, i)); // X lo = opd.BisectionSearchX(872.5d); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127, lo); // Y lo = opd.BisectionSearchY(872.5d); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127, lo); } @@ -116,10 +116,10 @@ public void Test_Hunt() opd.Add(new Ordinate(i, i)); // X var lo = opd.HuntSearchX(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871, lo); // Y lo = opd.HuntSearchY(872.5d); - Assert.AreEqual(lo, 871); + Assert.AreEqual(871, lo); // DSC opd = new OrderedPairedData(true, SortOrder.Descending, false, SortOrder.Descending); @@ -127,10 +127,10 @@ public void Test_Hunt() opd.Add(new Ordinate(i, i)); // X lo = opd.HuntSearchX(872.5d); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127, lo); // Y lo = opd.HuntSearchY(872.5d); - Assert.AreEqual(lo, 127); + Assert.AreEqual(127, lo); } @@ -147,7 +147,7 @@ public void Test_Lin() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Ascending, true, SortOrder.Ascending); double X = 75d; double Y = opd.GetYFromX(X); - Assert.AreEqual(Y, 150.0d, 1E-6); + Assert.AreEqual(150.0d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y); @@ -168,7 +168,7 @@ public void Test_LinLog() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Ascending, true, SortOrder.Ascending); double X = 75d; double Y = opd.GetYFromX(X, Transform.None, Transform.Logarithmic); - Assert.AreEqual(Y, 141.42135623731d, 1E-6); + Assert.AreEqual(141.42135623731d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y, Transform.None, Transform.Logarithmic); @@ -188,7 +188,7 @@ public void Test_LogLin() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Ascending, true, SortOrder.Ascending); double X = 75d; double Y = opd.GetYFromX(X, Transform.Logarithmic, Transform.None); - Assert.AreEqual(Y, 158.496250072116d, 1E-6); + Assert.AreEqual(158.496250072116d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y, Transform.Logarithmic, Transform.None); @@ -208,7 +208,7 @@ public void Test_LogLog() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Ascending, true, SortOrder.Ascending); double X = 75d; double Y = opd.GetYFromX(X, Transform.Logarithmic, Transform.Logarithmic); - Assert.AreEqual(Y, 150.0d, 1E-6); + Assert.AreEqual(150.0d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y, Transform.Logarithmic, Transform.Logarithmic); @@ -228,7 +228,7 @@ public void Test_LinZ() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Ascending, true, SortOrder.Ascending); double X = 0.18d; double Y = opd.GetYFromX(X, Transform.None, Transform.NormalZ); - Assert.AreEqual(Y, 0.358762529d, 1E-6); + Assert.AreEqual(0.358762529d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y, Transform.None, Transform.NormalZ); @@ -248,7 +248,7 @@ public void Test_ZLin() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Ascending, true, SortOrder.Ascending); double X = 0.18d; double Y = opd.GetYFromX(X, Transform.NormalZ, Transform.None); - Assert.AreEqual(Y, 0.362146174d, 1E-6); + Assert.AreEqual(0.362146174d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y, Transform.NormalZ, Transform.None); @@ -268,7 +268,7 @@ public void Test_ZZ() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Ascending, true, SortOrder.Ascending); double X = 0.18d; double Y = opd.GetYFromX(X, Transform.NormalZ, Transform.NormalZ); - Assert.AreEqual(Y, 0.36093855992815d, 1E-6); + Assert.AreEqual(0.36093855992815d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y, Transform.NormalZ, Transform.NormalZ); @@ -290,7 +290,7 @@ public void Test_RevLinear() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Descending, true, SortOrder.Descending); double X = 75d; double Y = opd.GetYFromX(X); - Assert.AreEqual(Y, 150.0d, 1E-6); + Assert.AreEqual(150.0d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y); @@ -312,7 +312,7 @@ public void Test_RevLinLog() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Descending, true, SortOrder.Descending); double X = 75d; double Y = opd.GetYFromX(X, Transform.None, Transform.Logarithmic); - Assert.AreEqual(Y, 141.42135623731d, 1E-6); + Assert.AreEqual(141.42135623731d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y, Transform.None, Transform.Logarithmic); @@ -334,7 +334,7 @@ public void Test_RevLogLin() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Descending, true, SortOrder.Descending); double X = 75d; double Y = opd.GetYFromX(X, Transform.Logarithmic, Transform.None); - Assert.AreEqual(Y, 158.496250072116d, 1E-6); + Assert.AreEqual(158.496250072116d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y, Transform.Logarithmic, Transform.None); @@ -356,7 +356,7 @@ public void Test_RevLogLog() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Descending, true, SortOrder.Descending); double X = 75d; double Y = opd.GetYFromX(X, Transform.Logarithmic, Transform.Logarithmic); - Assert.AreEqual(Y, 150.0d, 1E-6); + Assert.AreEqual(150.0d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y, Transform.Logarithmic, Transform.Logarithmic); @@ -378,7 +378,7 @@ public void Test_RevLinZ() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Descending, true, SortOrder.Descending); double X = 0.18d; double Y = opd.GetYFromX(X, Transform.None, Transform.NormalZ); - Assert.AreEqual(Y, 0.358762529d, 1E-6); + Assert.AreEqual(0.358762529d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y, Transform.None, Transform.NormalZ); @@ -400,7 +400,7 @@ public void Test_RevZLin() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Descending, true, SortOrder.Descending); double X = 0.18d; double Y = opd.GetYFromX(X, Transform.NormalZ, Transform.None); - Assert.AreEqual(Y, 0.362146174d, 1E-6); + Assert.AreEqual(0.362146174d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y, Transform.NormalZ, Transform.None); @@ -422,7 +422,7 @@ public void Test_RevZZ() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Descending, true, SortOrder.Descending); double X = 0.18d; double Y = opd.GetYFromX(X, Transform.NormalZ, Transform.NormalZ); - Assert.AreEqual(Y, 0.36093855992815d, 1E-6); + Assert.AreEqual(0.36093855992815d, Y, 1E-6); // Given Y var xFromY = opd.GetXFromY(Y, Transform.NormalZ, Transform.NormalZ); @@ -442,7 +442,7 @@ public void Test_Lin_List() var opd = new OrderedPairedData(XArray, YArray, true, SortOrder.Ascending, true, SortOrder.Ascending); double X = 75d; double Y = opd.GetYFromX(X); - Assert.AreEqual(Y, 150.0d, 1E-6); + Assert.AreEqual(150.0d, Y, 1E-6); var yVals = opd.GetYFromX(XArray); for (int i = 1; i < YArray.Length; i++) diff --git a/Test_Numerics/Data/Statistics/Test_BoxCox.cs b/Test_Numerics/Data/Statistics/Test_BoxCox.cs index 6c0ce5a6..2bcfbfa8 100644 --- a/Test_Numerics/Data/Statistics/Test_BoxCox.cs +++ b/Test_Numerics/Data/Statistics/Test_BoxCox.cs @@ -66,7 +66,7 @@ public void Test_Fit() var sample = new[] { 142.25d, 141.23d, 141.33d, 140.82d, 141.31d, 140.58d, 141.58d, 142.15d, 143.07d, 142.85d, 143.17d, 142.54d, 143.07d, 142.26d, 142.97d, 143.86d, 142.57d, 142.19d, 142.35d, 142.63d, 144.15d, 144.73d, 144.7d, 144.97d, 145.12d, 144.78d, 145.06d, 143.94d, 143.77d, 144.8d, 145.67d, 145.44d, 145.56d, 145.61d, 146.05d, 145.74d, 145.83d, 143.88d, 140.39d, 139.34d, 140.05d, 137.93d, 138.78d, 139.59d, 140.54d, 141.31d, 140.42d, 140.18d, 138.43d, 138.97d, 139.31d, 139.26d, 140.08d, 141.1d, 143.48d, 143.28d, 143.5d, 143.12d, 142.14d, 142.54d, 142.24d, 142.16d, 142.97d, 143.69d, 143.67d, 144.65d, 144.33d, 144.82d, 143.74d, 144.9d, 145.83d, 146.97d, 146.6d, 146.55d, 148.22d, 148.37d, 148.23d, 148.73d, 149.49d, 149.09d, 149.64d, 148.42d, 148.9d, 149.97d, 150.75d, 150.88d, 150.58d, 150.64d, 150.73d, 149.75d, 150.86d, 150.7d, 150.8d, 151.38d, 152.01d, 152.58d, 152.7d, 152.95d, 152.53d, 151.5d, 151.94d, 151.46d, 153.67d, 153.88d, 153.54d, 153.74d, 152.86d, 151.56d, 149.58d, 150.93d, 150.67d, 150.5d, 152.06d, 153.14d, 153.38d, 152.55d, 153.58d, 151.08d, 151.52d, 150.24d, 150.21d, 148.13d, 150.38d, 150.9d, 150.87d, 152.18d, 152.4d, 152.38d, 153.16d, 152.29d, 150.75d, 152.37d, 154.57d, 154.99d, 154.93d, 154.23d, 155.2d, 154.89d, 154.18d, 153.12d, 152.02d, 150.19d, 148.21d, 145.93d, 148.33d, 145.18d, 146.76d, 147.28d, 144.21d, 145.94d, 148.41d, 147.43d, 144.39d, 146.5d, 145.7d, 142.72d, 139.79d, 145.5d, 145.17d, 144.6d, 146.01d, 147.34d, 146.48d, 147.85d, 146.16d, 144.37d, 145.45d, 147.65d, 147.45d, 148.2d, 147.95d, 146.48d, 146.52d, 146.24d, 147.29d, 148.55d, 147.96d, 148.31d, 148.83d, 153.41d, 153.34d, 152.71d, 152.42d, 150.81d, 152.25d, 152.91d, 152.85d, 152.6d, 154.61d, 153.81d, 154.11d, 155.03d, 155.39d, 155.6d, 156.04d, 156.93d, 155.46d, 156.27d, 154.41d, 154.98d }; double l1 = 0d; BoxCox.FitLambda(sample, out l1); - Assert.AreEqual(l1, 1.670035d, 1E-4); + Assert.AreEqual(1.670035d, l1, 1E-4); } /// diff --git a/Test_Numerics/Data/Statistics/Test_HypothesisTests.cs b/Test_Numerics/Data/Statistics/Test_HypothesisTests.cs index c8e90605..bd37842b 100644 --- a/Test_Numerics/Data/Statistics/Test_HypothesisTests.cs +++ b/Test_Numerics/Data/Statistics/Test_HypothesisTests.cs @@ -80,7 +80,7 @@ public void Test_OneSampleTtest() Assert.AreEqual(p, true_p, 1E-4); var t = HypothesisTests.OneSampleTtest(new double[] { 23, 15, -5, 7, 1, -10, 12, -8, 20, 8, -2, -5 }); - Assert.AreEqual(t, 0.087585 * 2, 1E-6); + Assert.AreEqual(0.087585 * 2, t, 1E-6); } /// @@ -187,7 +187,7 @@ public void Test_JarqueBera() // known example var JB = HypothesisTests.JarqueBeraTest(new double[] { 4, 5, 5, 6, 9, 12, 13, 14, 14, 19, 22, 24, 25 }); - Assert.AreEqual(JB, 0.592128, 1E-6); + Assert.AreEqual(0.592128, JB, 1E-6); } diff --git a/Test_Numerics/Distributions/Multivariate/Test_MultivariateNormal.cs b/Test_Numerics/Distributions/Multivariate/Test_MultivariateNormal.cs index 98c1bb06..de4cced4 100644 --- a/Test_Numerics/Distributions/Multivariate/Test_MultivariateNormal.cs +++ b/Test_Numerics/Distributions/Multivariate/Test_MultivariateNormal.cs @@ -145,47 +145,47 @@ public void Test_MultivariateNormalCDF_R() // AB var p = mvn.CDF(new[] { Normal.StandardZ(0.25), Normal.StandardZ(0.35), double.PositiveInfinity, double.PositiveInfinity }); - Assert.AreEqual(p, 0.05011069, 1E-4); + Assert.AreEqual(0.05011069, p, 1E-4); // AC p = mvn.CDF(new[] { Normal.StandardZ(0.25), double.PositiveInfinity, Normal.StandardZ(0.5), double.PositiveInfinity }); - Assert.AreEqual(p, 0.0827451, 1E-4); + Assert.AreEqual(0.0827451, p, 1E-4); // AD p = mvn.CDF(new[] { Normal.StandardZ(0.25), double.PositiveInfinity, double.PositiveInfinity, Normal.StandardZ(0.5) }); - Assert.AreEqual(p, 0.0827451, 1E-4); + Assert.AreEqual(0.0827451, p, 1E-4); // BC p = mvn.CDF(new[] { double.PositiveInfinity, Normal.StandardZ(0.35), Normal.StandardZ(0.5), double.PositiveInfinity }); - Assert.AreEqual(p, 0.1254504, 1E-4); + Assert.AreEqual(0.1254504, p, 1E-4); // BD p = mvn.CDF(new[] { double.PositiveInfinity, Normal.StandardZ(0.35), double.PositiveInfinity, Normal.StandardZ(0.5) }); - Assert.AreEqual(p, 0.1254504, 1E-4); + Assert.AreEqual(0.1254504, p, 1E-4); // CD p = mvn.CDF(new[] { double.PositiveInfinity, double.PositiveInfinity, Normal.StandardZ(0.5), Normal.StandardZ(0.5) }); - Assert.AreEqual(p, 0.1964756, 1E-4); + Assert.AreEqual(0.1964756, p, 1E-4); // ABC p = mvn.CDF(new[] { Normal.StandardZ(0.25), Normal.StandardZ(0.35), Normal.StandardZ(0.5), double.PositiveInfinity }); - Assert.AreEqual(p, 0.005960125, 1E-4); + Assert.AreEqual(0.005960125, p, 1E-4); // ABD p = mvn.CDF(new[] { Normal.StandardZ(0.25), Normal.StandardZ(0.35), double.PositiveInfinity, Normal.StandardZ(0.5) }); - Assert.AreEqual(p, 0.005964513, 1E-4); + Assert.AreEqual(0.005964513, p, 1E-4); // ACD p = mvn.CDF(new[] { Normal.StandardZ(0.25), double.PositiveInfinity, Normal.StandardZ(0.5), Normal.StandardZ(0.5) }); - Assert.AreEqual(p, 0.0128066, 1E-4); + Assert.AreEqual(0.0128066, p, 1E-4); // BCD p = mvn.CDF(new[] { double.PositiveInfinity, Normal.StandardZ(0.35), Normal.StandardZ(0.5), Normal.StandardZ(0.5) }); - Assert.AreEqual(p, 0.02324389, 1E-4); + Assert.AreEqual(0.02324389, p, 1E-4); // ABCD p = mvn.CDF(new[] { Normal.StandardZ(0.25), Normal.StandardZ(0.35), Normal.StandardZ(0.5), Normal.StandardZ(0.5)}); - Assert.AreEqual(p, 3.593582e-13, 1E-4); + Assert.AreEqual(3.593582e-13, p, 1E-4); } /// @@ -206,7 +206,7 @@ public void Test_MultivariateNormalCDF_R_PerfectNegative() var mvn = new MultivariateNormal(mean, covar) { MVNUNI = new MersenneTwister(12345) }; var p = mvn.CDF(new[] { Normal.StandardZ(0.5), Normal.StandardZ(0.5), Normal.StandardZ(0.5) }); - Assert.AreEqual(p, 0.002740932, 1E-4); + Assert.AreEqual(0.002740932, p, 1E-4); } /// @@ -226,7 +226,7 @@ public void Test_MultivariateNormalCDF_R_PerfectPositive() var mvn = new MultivariateNormal(mean, covar) { MVNUNI = new MersenneTwister(12345) }; var p = mvn.CDF(new[] { Normal.StandardZ(0.5), Normal.StandardZ(0.5), Normal.StandardZ(0.5) }); - Assert.AreEqual(p, 0.4661416, 1E-4); + Assert.AreEqual(0.4661416, p, 1E-4); } @@ -246,7 +246,7 @@ public void Test_MultivariateNormalCDF_R_Independent() var mvn = new MultivariateNormal(mean, covar) { MVNUNI = new MersenneTwister(12345) }; var p = mvn.CDF(new[] { Normal.StandardZ(0.5), Normal.StandardZ(0.5), Normal.StandardZ(0.5) }); - Assert.AreEqual(p, 0.125, 1E-4); + Assert.AreEqual(0.125, p, 1E-4); } diff --git a/Test_Numerics/Distributions/Univariate/Test_ChiSquared.cs b/Test_Numerics/Distributions/Univariate/Test_ChiSquared.cs index 25c5f4d2..5f626d9f 100644 --- a/Test_Numerics/Distributions/Univariate/Test_ChiSquared.cs +++ b/Test_Numerics/Distributions/Univariate/Test_ChiSquared.cs @@ -275,15 +275,15 @@ public void Test_CDF() public void Test_InverseCDF() { var x = new ChiSquared(1); - Assert.AreEqual(x.InverseCDF(0.24817036595415071751), 0.09999,1e-04); - Assert.AreEqual(x.InverseCDF(0.68268949213708589717), 1, 1e-04); - Assert.AreEqual(x.InverseCDF(0.9809835), 5.5, 1e-04); + Assert.AreEqual(0.09999, x.InverseCDF(0.24817036595415071751), 1e-04); + Assert.AreEqual(1, x.InverseCDF(0.68268949213708589717), 1e-04); + Assert.AreEqual(5.5, x.InverseCDF(0.9809835), 1e-04); var x2 = new ChiSquared(2); - Assert.AreEqual(x2.InverseCDF(0), 0); - Assert.AreEqual(x2.InverseCDF(0.04877057), 0.1,1e-04); - Assert.AreEqual(x2.InverseCDF(0.3934693), 1,1e-04); - Assert.AreEqual(x2.InverseCDF(0.9360721), 5.5, 1e-04); + Assert.AreEqual(0,x2.InverseCDF(0)); + Assert.AreEqual(0.1, x2.InverseCDF(0.04877057),1e-04); + Assert.AreEqual(1, x2.InverseCDF(0.3934693),1e-04); + Assert.AreEqual(5.5, x2.InverseCDF(0.9360721), 1e-04); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_EmpiricalDistribution.cs b/Test_Numerics/Distributions/Univariate/Test_EmpiricalDistribution.cs index d7e508a8..9ca4a6a7 100644 --- a/Test_Numerics/Distributions/Univariate/Test_EmpiricalDistribution.cs +++ b/Test_Numerics/Distributions/Univariate/Test_EmpiricalDistribution.cs @@ -145,7 +145,7 @@ public void Test_ConvolveTwoUniformDistributions() var convolved = EmpiricalDistribution.Convolve(dist1, dist2, 1000); // Assert number of points - Assert.AreEqual(1000, convolved.XValues.Count, "Should have exactly 1000 points"); + Assert.HasCount(1000, convolved.XValues); // Expected: Min ≈ 0, Max ≈ 2, Mean ≈ 1 Assert.AreEqual(0.0, convolved.Minimum, 0.05, "Minimum should be approximately 0"); @@ -191,7 +191,7 @@ public void Test_ConvolveTwoNormalDistributions() var convolved = EmpiricalDistribution.Convolve(dist1, dist2, 2048); // Assert number of points - Assert.AreEqual(2048, convolved.XValues.Count, "Should have exactly 2048 points"); + Assert.HasCount(2048, convolved.XValues); // Expected: For N(0,1) + N(0,1) = N(0, sqrt(2)) // Mean ≈ 0, StdDev ≈ 1.414 @@ -227,7 +227,7 @@ public void Test_ConvolveDifferentRanges() var convolved = EmpiricalDistribution.Convolve(dist1, dist2, 500); // Assert number of points - Assert.AreEqual(500, convolved.XValues.Count, "Should have exactly 500 points"); + Assert.HasCount(500, convolved.XValues); // Expected: Range ≈ [5, 25], Mean ≈ 15 Assert.AreEqual(5.0, convolved.Minimum, 0.5, "Minimum should be approximately 5"); @@ -270,8 +270,8 @@ public void Test_ConvolveFiveDistributions() Assert.AreEqual(convolved.Mean, convolved.Median, 0.5, "Median should be close to mean for symmetric distribution"); // Verify CDF properties - Assert.IsTrue(convolved.CDF(convolved.Minimum) <= 0.01, "CDF at minimum should be close to 0"); - Assert.IsTrue(convolved.CDF(convolved.Maximum) >= 0.99, "CDF at maximum should be close to 1"); + Assert.IsLessThanOrEqualTo(0.01,convolved.CDF(convolved.Minimum), "CDF at minimum should be close to 0"); + Assert.IsGreaterThanOrEqualTo(0.99,convolved.CDF(convolved.Maximum), "CDF at maximum should be close to 1"); } /// @@ -311,14 +311,14 @@ public void Test_ConvolveFiveDifferentDistributions() var convolved = EmpiricalDistribution.Convolve(distributions, 1000); // Assert number of points - Assert.AreEqual(1000, convolved.XValues.Count, "Should have exactly 1000 points"); + Assert.HasCount(1000, convolved.XValues, "Should have exactly 1000 points"); // Compare with expected (allow for numerical error) double meanError = Math.Abs(convolved.Mean - expectedMean) / expectedMean; double stdDevError = Math.Abs(convolved.StandardDeviation - expectedStdDev) / expectedStdDev; - Assert.IsTrue(meanError < 0.05, $"Mean error {meanError:P2} should be less than 5%"); - Assert.IsTrue(stdDevError < 0.15, $"StdDev error {stdDevError:P2} should be less than 15%"); + Assert.IsLessThan(0.05,meanError, $"Mean error {meanError:P2} should be less than 5%"); + Assert.IsLessThan(0.15,stdDevError, $"StdDev error {stdDevError:P2} should be less than 15%"); // Verify range is reasonable double expectedMin = distributions.Sum(d => d.Minimum); @@ -347,7 +347,7 @@ public void Test_NonPowerOfTwoPoints() var convolved = EmpiricalDistribution.Convolve(dist1, dist2, size); // Assert correct number of points - Assert.AreEqual(size, convolved.XValues.Count, $"Should have exactly {size} points"); + Assert.HasCount(size, convolved.XValues, $"Should have exactly {size} points"); // Assert reasonable properties Assert.AreEqual(1.0, convolved.Mean, 0.1, $"Mean should be approximately 1 for size {size}"); diff --git a/Test_Numerics/Distributions/Univariate/Test_Exponential.cs b/Test_Numerics/Distributions/Univariate/Test_Exponential.cs index 391f62b7..4e2ca62f 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Exponential.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Exponential.cs @@ -147,10 +147,10 @@ public void Test_EXP_Quantile() var EXP = new Exponential(27421d, 25200d); double q100 = EXP.InverseCDF(0.99d); double true_100 = 143471d; - Assert.AreEqual((q100 - true_100) / true_100 < 0.01d, true); + Assert.IsLessThan(0.01d, (q100 - true_100) / true_100); double p = EXP.CDF(q100); double true_p = 0.99d; - Assert.AreEqual(p == true_p, true); + Assert.AreEqual(p, true_p); } /// @@ -172,13 +172,13 @@ public void Test_EXP_StandardError() var EXP = new Exponential(27421d, 25200d); double se100 = Math.Sqrt(EXP.QuantileVariance(0.99d, 85, ParameterEstimationMethod.MethodOfMoments)); double true_se100 = 15986d; - Assert.AreEqual((se100 - true_se100) / true_se100 < 0.01d, true); + Assert.IsLessThan(0.01d,(se100 - true_se100) / true_se100); // Maximum Likelihood EXP = new Exponential(12629d, 39991d); se100 = Math.Sqrt(EXP.QuantileVariance(0.99d, 85, ParameterEstimationMethod.MaximumLikelihood)); true_se100 = 20048d; - Assert.AreEqual((se100 - true_se100) / true_se100 < 0.01d, true); + Assert.IsLessThan(0.01d, (se100 - true_se100) / true_se100); } /// @@ -192,8 +192,8 @@ public void Test_EXP_Partials() double dQdScale = EXP.QuantileGradient(0.99d)[1]; double true_dLocation = 1.0d; double true_dScale = 4.60517d; - Assert.AreEqual((dQdLocation - true_dLocation) / true_dLocation < 0.01d, true); - Assert.AreEqual((dQdScale - true_dScale) / true_dScale < 0.01d, true); + Assert.IsLessThan(0.01d, (dQdLocation - true_dLocation) / true_dLocation); + Assert.IsLessThan(0.01d, (dQdScale - true_dScale) / true_dScale); } /// @@ -203,12 +203,12 @@ public void Test_EXP_Partials() public void Test_Construction() { var EXP = new Exponential(-5, 100); - Assert.AreEqual(EXP.Xi, -5); - Assert.AreEqual(EXP.Alpha, 100); + Assert.AreEqual(-5,EXP.Xi); + Assert.AreEqual(100,EXP.Alpha); var EXP2 = new Exponential(0, 1); - Assert.AreEqual(EXP2.Xi, 0); - Assert.AreEqual(EXP2.Alpha, 1); + Assert.AreEqual(0,EXP2.Xi); + Assert.AreEqual(1, EXP2.Alpha); } /// @@ -237,10 +237,10 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var EXP = new Exponential(1, 1); - Assert.AreEqual(EXP.ParametersToString[0, 0], "Location (ξ)"); - Assert.AreEqual(EXP.ParametersToString[1, 0], "Scale (α)"); - Assert.AreEqual(EXP.ParametersToString[0, 1], "1"); - Assert.AreEqual(EXP.ParametersToString[1, 1], "1"); + Assert.AreEqual("Location (ξ)",EXP.ParametersToString[0, 0] ); + Assert.AreEqual("Scale (α)", EXP.ParametersToString[1, 0]); + Assert.AreEqual("1", EXP.ParametersToString[0, 1]); + Assert.AreEqual("1", EXP.ParametersToString[1, 1]); } /// @@ -267,7 +267,7 @@ public void Test_Mean() Assert.AreEqual(2, EXP.Mean); var EXP2 = new Exponential(-100, 4); - Assert.AreEqual(EXP2.Mean, -96); + Assert.AreEqual(-96, EXP2.Mean); } /// @@ -277,10 +277,10 @@ public void Test_Mean() public void Test_Median() { var EXP = new Exponential(0,1); - Assert.AreEqual(EXP.Median, 0.693147, 1e-04); + Assert.AreEqual(0.693147, EXP.Median, 1e-04); var EXP2 = new Exponential(-100, 1); - Assert.AreEqual(EXP2.Median, -99.306852, 1e-04); + Assert.AreEqual(-99.306852, EXP2.Median, 1e-04); } /// @@ -290,10 +290,10 @@ public void Test_Median() public void Test_Mode() { var EXP = new Exponential(0,1); - Assert.AreEqual(EXP.Mode, 0); + Assert.AreEqual(0, EXP.Mode); var EXP2 = new Exponential(-100,1); - Assert.AreEqual(EXP2.Mode, -100); + Assert.AreEqual(-100, EXP2.Mode); } /// @@ -303,10 +303,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var EXP = new Exponential(0,1); - Assert.AreEqual(EXP.StandardDeviation, 1); + Assert.AreEqual(1, EXP.StandardDeviation); var EXP2 = new Exponential(-100, 1); - Assert.AreEqual(EXP2.StandardDeviation, 1); + Assert.AreEqual(1, EXP2.StandardDeviation); } /// @@ -316,7 +316,7 @@ public void Test_StandardDeviation() public void Test_Skewness() { var EXP = new Exponential(0, 1); - Assert.AreEqual(EXP.Skewness, 2); + Assert.AreEqual(2, EXP.Skewness); } /// @@ -326,7 +326,7 @@ public void Test_Skewness() public void Test_Kurtosis() { var EXP = new Exponential(0, 1); - Assert.AreEqual(EXP.Kurtosis, 9); + Assert.AreEqual(9, EXP.Kurtosis); } /// @@ -336,10 +336,10 @@ public void Test_Kurtosis() public void Test_Minimum() { var EXP = new Exponential(0, 1); - Assert.AreEqual(EXP.Minimum, 0); + Assert.AreEqual(0, EXP.Minimum); var EXP2 = new Exponential(-100, 1); - Assert.AreEqual(EXP2.Minimum, -100); + Assert.AreEqual(-100, EXP2.Minimum); } /// @@ -349,7 +349,7 @@ public void Test_Minimum() public void Test_Maximum() { var EXP = new Exponential(0, 1); - Assert.AreEqual(EXP.Maximum, double.PositiveInfinity); + Assert.AreEqual(double.PositiveInfinity,EXP.Maximum); } /// diff --git a/Test_Numerics/Distributions/Univariate/Test_GammaDistribution.cs b/Test_Numerics/Distributions/Univariate/Test_GammaDistribution.cs index e795db82..eb90c7a7 100644 --- a/Test_Numerics/Distributions/Univariate/Test_GammaDistribution.cs +++ b/Test_Numerics/Distributions/Univariate/Test_GammaDistribution.cs @@ -75,8 +75,8 @@ public void Test_GammaDist_MOM() double lambda = G.Kappa; double trueA = 0.08317d; double trueL = 15.91188d; - Assert.AreEqual((alpha - trueA) / trueA < 0.01d, true); - Assert.AreEqual((lambda - trueL) / trueL < 0.01d, true); + Assert.IsLessThan(0.01d,(alpha - trueA) / trueA); + Assert.IsLessThan(0.01d,(lambda - trueL) / trueL); } [TestMethod()] @@ -93,10 +93,10 @@ public void Test_GammaDist_LMOM_Fit() Assert.AreEqual(scale, true_scale, 0.0001d); Assert.AreEqual(shape, true_shape, 0.0001d); var lmom = G.LinearMomentsFromParameters(G.GetParameters); - Assert.AreEqual(lmom[0], 9.9575163d, 0.0001d); - Assert.AreEqual(lmom[1], 1.9822363d, 0.0001d); - Assert.AreEqual(lmom[2], 0.1175059d, 0.0001d); - Assert.AreEqual(lmom[3], 0.1268391d, 0.0001d); + Assert.AreEqual(9.9575163d, lmom[0], 0.0001d); + Assert.AreEqual(1.9822363d, lmom[1], 0.0001d); + Assert.AreEqual(0.1175059d,lmom[2], 0.0001d); + Assert.AreEqual(0.1268391d, lmom[3], 0.0001d); } /// @@ -119,8 +119,8 @@ public void Test_GammaDist_MLE() double lambda = G.Kappa; double trueA = 0.08833d; double trueL = 16.89937d; - Assert.AreEqual((alpha - trueA) / trueA < 0.01d, true); - Assert.AreEqual((lambda - trueL) / trueL < 0.01d, true); + Assert.IsLessThan(0.01d, (alpha - trueA) / trueA); + Assert.IsLessThan(0.01d, (lambda - trueL) / trueL); } /// @@ -140,10 +140,10 @@ public void Test_GammaDist_Quantile() var G = new GammaDistribution(1d / 0.08833d, 16.89937d); double q1000 = G.InverseCDF(0.99d); double true_1000 = 315.87d; - Assert.AreEqual((q1000 - true_1000) / true_1000 < 0.01d, true); + Assert.IsLessThan(0.01d, (q1000 - true_1000) / true_1000); double p = G.CDF(q1000); double true_p = 0.99d; - Assert.AreEqual(p == true_p, true); + Assert.AreEqual(p, true_p); } /// @@ -165,13 +165,13 @@ public void Test_GammaDist_StandardError() var G = new GammaDistribution(1d / 0.08317d, 15.9118d); double se1000 = Math.Sqrt(G.QuantileVariance(0.99d, 69, ParameterEstimationMethod.MethodOfMoments)); double true_se1000 = 16.024d; - Assert.AreEqual((se1000 - true_se1000) / true_se1000 < 0.01d, true); + Assert.IsLessThan(0.01d, (se1000 - true_se1000) / true_se1000); // Maximum Likelihood G = new GammaDistribution(1d / 0.08833d, 16.89937d); se1000 = Math.Sqrt(G.QuantileVariance(0.99d, 69, ParameterEstimationMethod.MaximumLikelihood)); true_se1000 = 15.022d; - Assert.AreEqual((se1000 - true_se1000) / true_se1000 < 0.01d, true); + Assert.IsLessThan(0.01d, (se1000 - true_se1000) / true_se1000); } /// @@ -181,12 +181,12 @@ public void Test_GammaDist_StandardError() public void Test_Construction() { var G = new GammaDistribution(2, 10); - Assert.AreEqual(G.Theta, 2); - Assert.AreEqual(G.Kappa, 10); + Assert.AreEqual(2,G.Theta); + Assert.AreEqual(10,G.Kappa); var G2 = new GammaDistribution(-1, 4); - Assert.AreEqual(G2.Theta, -1); - Assert.AreEqual(G2.Kappa, 4); + Assert.AreEqual(-1,G2.Theta); + Assert.AreEqual(4,G2.Kappa); } @@ -197,10 +197,10 @@ public void Test_Construction() public void Test_Rate() { var G = new GammaDistribution(2, 2); - Assert.AreEqual(G.Rate, 0.5); + Assert.AreEqual(0.5, G.Rate); var G2 = new GammaDistribution(); - Assert.AreEqual(G2.Rate, 0.1); + Assert.AreEqual(0.1, G2.Rate); } /// @@ -210,10 +210,10 @@ public void Test_Rate() public void Test_ParametersToString() { var G = new GammaDistribution(); - Assert.AreEqual(G.ParametersToString[0, 0], "Scale (θ)"); - Assert.AreEqual(G.ParametersToString[1, 0], "Shape (κ)"); - Assert.AreEqual(G.ParametersToString[0, 1], "10"); - Assert.AreEqual(G.ParametersToString[1, 1], "2"); + Assert.AreEqual("Scale (θ)",G.ParametersToString[0, 0] ); + Assert.AreEqual("Shape (κ)", G.ParametersToString[1, 0]); + Assert.AreEqual("10", G.ParametersToString[0, 1]); + Assert.AreEqual("2", G.ParametersToString[1, 1]); } /// @@ -256,7 +256,7 @@ public void Test_Moments() public void Test_Mean() { var G = new GammaDistribution(); - Assert.AreEqual(G.Mean, 20); + Assert.AreEqual(20, G.Mean); } /// @@ -292,7 +292,7 @@ public void Test_StandardDeviation() Assert.AreEqual(14.142135, G.StandardDeviation,1e-04); var G2 = new GammaDistribution(1, 2); - Assert.AreEqual(G2.StandardDeviation, 1.4142135, 1e-04); + Assert.AreEqual(1.4142135, G2.StandardDeviation, 1e-04); } /// @@ -302,10 +302,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var G = new GammaDistribution(); - Assert.AreEqual(G.Skewness, 1.4142135, 1e-04); + Assert.AreEqual(1.4142135, G.Skewness, 1e-04); var G2 = new GammaDistribution(10, 100); - Assert.AreEqual(G2.Skewness, 0.2); + Assert.AreEqual(0.2, G2.Skewness); } /// @@ -315,13 +315,13 @@ public void Test_Skewness() public void Test_Kurtosis() { var G = new GammaDistribution(); - Assert.AreEqual(G.Kurtosis, 6); + Assert.AreEqual(6, G.Kurtosis); var G2 = new GammaDistribution(10, 6); - Assert.AreEqual(G2.Kurtosis, 4); + Assert.AreEqual(4, G2.Kurtosis); var G3 = new GammaDistribution(10, 2.5); - Assert.AreEqual(G3.Kurtosis, 5.4); + Assert.AreEqual(5.4, G3.Kurtosis); } /// @@ -331,7 +331,7 @@ public void Test_Kurtosis() public void Test_Minimum() { var G = new GammaDistribution(); - Assert.AreEqual(G.Minimum, 0); + Assert.AreEqual(0, G.Minimum); } /// @@ -341,7 +341,7 @@ public void Test_Minimum() public void Test_Maximum() { var G = new GammaDistribution(); - Assert.AreEqual(G.Maximum, double.PositiveInfinity); + Assert.AreEqual(double.PositiveInfinity,G.Maximum ); } /// @@ -364,8 +364,8 @@ public void ValidateMLE_NR() double lambda = G.Kappa; double trueA = 0.08833d; double trueL = 16.89937d; - Assert.AreEqual((alpha - trueA) / trueA < 0.2d, true); - Assert.AreEqual((lambda - trueL) / trueL < 0.01d, true); + Assert.IsLessThan(0.2d,(alpha - trueA) / trueA); + Assert.IsLessThan(0.01d,(lambda - trueL) / trueL); } /// @@ -388,8 +388,8 @@ public void ValidateMLE_Bobee() double lambda = G.Kappa; double trueA = 0.08833d; double trueL = 16.89937d; - Assert.AreEqual((alpha - trueA) / trueA < 0.2d, true); - Assert.AreEqual((lambda - trueL) / trueL < 0.01d, true); + Assert.IsLessThan(0.2d, (alpha - trueA) / trueA); + Assert.IsLessThan(0.01d, (lambda - trueL) / trueL); } /// @@ -399,12 +399,12 @@ public void ValidateMLE_Bobee() public void Test_PDF() { var G = new GammaDistribution(10,1); - Assert.AreEqual(G.PDF(1), 0.090483, 1e-04); - Assert.AreEqual(G.PDF(10), 0.036787, 1e-04); + Assert.AreEqual(0.090483, G.PDF(1), 1e-04); + Assert.AreEqual(0.036787, G.PDF(10), 1e-04); var G2 = new GammaDistribution(1,1); - Assert.AreEqual(G2.PDF(1), 0.367879, 1e-04); - Assert.AreEqual(G2.PDF(10), 0.0000453999, 1e-10); + Assert.AreEqual(0.367879, G2.PDF(1), 1e-04); + Assert.AreEqual(0.0000453999, G2.PDF(10), 1e-10); } /// @@ -414,15 +414,15 @@ public void Test_PDF() public void Test_CDF() { var G = new GammaDistribution(10, 1); - Assert.AreEqual(G.CDF(1), 0.09516258, 1e-04); - Assert.AreEqual(G.CDF(10), 0.63212, 1e-04); + Assert.AreEqual(0.09516258, G.CDF(1), 1e-04); + Assert.AreEqual(0.63212, G.CDF(10), 1e-04); var G2 = new GammaDistribution(1, 1); - Assert.AreEqual(G2.CDF(10), 0.999954, 1e-04); + Assert.AreEqual(0.999954, G2.CDF(10), 1e-04); var G3 = new GammaDistribution(0.1, 10); - Assert.AreEqual(G3.CDF(1), 0.54207028, 1e-04); - Assert.AreEqual(G3.CDF(10), 0.999999, 1e-04); + Assert.AreEqual(0.54207028, G3.CDF(1), 1e-04); + Assert.AreEqual(0.999999, G3.CDF(10), 1e-04); } /// @@ -432,13 +432,13 @@ public void Test_CDF() public void Test_InverseCDF() { var G = new GammaDistribution(10,1); - Assert.AreEqual(G.InverseCDF(0), 0); - Assert.AreEqual(G.InverseCDF(1), double.PositiveInfinity); + Assert.AreEqual(0,G.InverseCDF(0)); + Assert.AreEqual(double.PositiveInfinity,G.InverseCDF(1) ); var G2 = new GammaDistribution(9.30149316e-07,1082.2442991605726); - Assert.AreEqual(G2.InverseCDF(0.99), 1.0792e-03,1e-04); - Assert.AreEqual(G2.InverseCDF(0.9919), 1.0817e-03,1e-04); - Assert.AreEqual(G2.InverseCDF(0.993), 1.0834e-03,1e-04); + Assert.AreEqual(1.0792e-03, G2.InverseCDF(0.99),1e-04); + Assert.AreEqual(1.0817e-03, G2.InverseCDF(0.9919),1e-04); + Assert.AreEqual(1.0834e-03, G2.InverseCDF(0.993),1e-04); } /// @@ -449,12 +449,12 @@ public void Test_InverseCDF() public void ValidateWilsonHilfertyInverseCDF() { var G = new GammaDistribution(1, 1); - Assert.AreEqual(G.WilsonHilfertyInverseCDF(.99), 4.62111,1e-04); - Assert.AreEqual(G.WilsonHilfertyInverseCDF(0.999), 6.92202,1e-04); + Assert.AreEqual(4.62111, G.WilsonHilfertyInverseCDF(.99),1e-04); + Assert.AreEqual(6.92202, G.WilsonHilfertyInverseCDF(0.999),1e-04); var G2 = new GammaDistribution(1, 2); - Assert.AreEqual(G2.WilsonHilfertyInverseCDF(0.05), 0.3566877,1e-04); - Assert.AreEqual(G2.WilsonHilfertyInverseCDF(0.10), 0.5326, 1e-04); + Assert.AreEqual(0.3566877, G2.WilsonHilfertyInverseCDF(0.05),1e-04); + Assert.AreEqual(0.5326, G2.WilsonHilfertyInverseCDF(0.10), 1e-04); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_GeneralizedBeta.cs b/Test_Numerics/Distributions/Univariate/Test_GeneralizedBeta.cs index f48becf7..edb5db03 100644 --- a/Test_Numerics/Distributions/Univariate/Test_GeneralizedBeta.cs +++ b/Test_Numerics/Distributions/Univariate/Test_GeneralizedBeta.cs @@ -201,10 +201,10 @@ public void Test_Moments() public void Test_Mean() { var b = new GeneralizedBeta(2, 2, 0, 1); - Assert.AreEqual(b.Mean, 0.5); + Assert.AreEqual(0.5,b.Mean); var b2 = new GeneralizedBeta(2, 2, -10, 10); - Assert.AreEqual(b2.Mean, 0); + Assert.AreEqual(0,b2.Mean); } /// @@ -224,10 +224,10 @@ public void Test_Median() public void Test_Mode() { var b = new GeneralizedBeta(); - Assert.AreEqual(b.Mode, 0.5); + Assert.AreEqual(0.5,b.Mode); var b2 = new GeneralizedBeta(2, 2, -10, 10); - Assert.AreEqual(b2.Mode, 0); + Assert.AreEqual(0,b2.Mode); } /// @@ -237,10 +237,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var b = new GeneralizedBeta(); - Assert.AreEqual(b.StandardDeviation, 0.223606, 1e-04); + Assert.AreEqual(0.223606, b.StandardDeviation, 1e-04); var b2 = new GeneralizedBeta(2, 2, -10, 10); - Assert.AreEqual(b2.StandardDeviation, 4.47213,1e-04); + Assert.AreEqual(4.47213, b2.StandardDeviation,1e-04); } /// @@ -250,10 +250,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var b = new GeneralizedBeta(); - Assert.AreEqual(b.Skewness, 0); + Assert.AreEqual(0,b.Skewness); var b2 = new GeneralizedBeta(2, 10); - Assert.AreEqual(b2.Skewness, 0.92140088,1e-04); + Assert.AreEqual(0.92140088, b2.Skewness, 1e-04); } /// @@ -279,12 +279,12 @@ public void Test_Kurtosis() public void Test_MinimumMaximum() { var b = new GeneralizedBeta(); - Assert.AreEqual(b.Minimum, 0); - Assert.AreEqual(b.Maximum, 1); + Assert.AreEqual(0,b.Minimum); + Assert.AreEqual(1,b.Maximum); var b2 = new GeneralizedBeta(2, 2, -10, 10); - Assert.AreEqual(b2.Minimum, -10); - Assert.AreEqual(b2.Maximum, 10); + Assert.AreEqual(-10,b2.Minimum); + Assert.AreEqual(10,b2.Maximum); } /// @@ -313,8 +313,8 @@ public void Test_PDF() public void Test_CDF() { var b = new GeneralizedBeta(2,2,-10,10); - Assert.AreEqual(b.CDF(-11), 0); - Assert.AreEqual(b.CDF(11),1); + Assert.AreEqual(0,b.CDF(-11)); + Assert.AreEqual(1,b.CDF(11)); var b2 = new GeneralizedBeta(9, 1); Assert.AreEqual(0, b2.CDF(0)); diff --git a/Test_Numerics/Distributions/Univariate/Test_GeneralizedExtremeValue.cs b/Test_Numerics/Distributions/Univariate/Test_GeneralizedExtremeValue.cs index c1542ba3..36d411d5 100644 --- a/Test_Numerics/Distributions/Univariate/Test_GeneralizedExtremeValue.cs +++ b/Test_Numerics/Distributions/Univariate/Test_GeneralizedExtremeValue.cs @@ -84,9 +84,9 @@ public void Test_GEV_MOM_Fit() double true_x = 11012d; double true_a = 6209.4d; double true_k = 0.0736d; - Assert.AreEqual((x - true_x) / true_x < 0.01d, true); - Assert.AreEqual((a - true_a) / true_a < 0.01d, true); - Assert.AreEqual((k - true_k) / true_k < 0.01d, true); + Assert.IsLessThan(0.01d,(x - true_x) / true_x ); + Assert.IsLessThan(0.01d,(a - true_a) / true_a); + Assert.IsLessThan(0.01d, (k - true_k) / true_k); } /// @@ -116,10 +116,10 @@ public void Test_GEV_LMOM_Fit() Assert.AreEqual(a, true_a, 0.001d); Assert.AreEqual(k, true_k, 0.001d); var lmom = GEV.LinearMomentsFromParameters(GEV.GetParameters); - Assert.AreEqual(lmom[0], 1648.806d, 0.001d); - Assert.AreEqual(lmom[1], 138.2366d, 0.001d); - Assert.AreEqual(lmom[2], 0.1030703d, 0.001d); - Assert.AreEqual(lmom[3], 0.1277244d, 0.001d); + Assert.AreEqual(1648.806d, lmom[0], 0.001d); + Assert.AreEqual(138.2366d, lmom[1], 0.001d); + Assert.AreEqual(0.1030703d, lmom[2], 0.001d); + Assert.AreEqual(0.1277244d, lmom[3], 0.001d); } /// @@ -144,9 +144,9 @@ public void Test_GEV_MLE_Fit() double true_x = 10849d; double true_a = 5745.6d; double true_k = 0.005d; - Assert.AreEqual((x - true_x) / true_x < 0.01d, true); - Assert.AreEqual((a - true_a) / true_a < 0.01d, true); - Assert.AreEqual((k - true_k) / true_k < 0.01d, true); + Assert.IsLessThan(0.01d, (x - true_x) / true_x); + Assert.IsLessThan(0.01d, (a - true_a) / true_a); + Assert.IsLessThan(0.01d, (k - true_k) / true_k); } /// @@ -166,10 +166,10 @@ public void Test_GEV_Quantile() var GEV = new GeneralizedExtremeValue(10849d, 5745.6d, 0.005d); double q100 = GEV.InverseCDF(0.99d); double true_q100 = 36977d; - Assert.AreEqual((q100 - true_q100) / true_q100 < 0.01d, true); + Assert.IsLessThan(0.01d, (q100 - true_q100) / true_q100); double p = GEV.CDF(q100); double true_p = 0.99d; - Assert.AreEqual((p - true_p) / true_p < 0.01d, true); + Assert.IsLessThan(0.01d, (p - true_p) / true_p); } /// @@ -205,17 +205,17 @@ public void Test_GEV_StandardError() var covar = GEV.ParameterCovariance(sample.Length, ParameterEstimationMethod.MaximumLikelihood); double qVar = GEV.QuantileVariance(0.99d, sample.Length, ParameterEstimationMethod.MaximumLikelihood); double qSigma = Math.Sqrt(qVar); - Assert.AreEqual((partials[0] - true_dXdU) / true_dXdU < 0.01d, true); - Assert.AreEqual((partials[1] - true_dxdA) / true_dxdA < 0.01d, true); - Assert.AreEqual((partials[2] - true_dxdK) / true_dxdK < 0.01d, true); - Assert.AreEqual((covar[0, 0] - true_VarU) / true_VarU < 0.01d, true); - Assert.AreEqual((covar[1, 1] - true_VarA) / true_VarA < 0.01d, true); - Assert.AreEqual((covar[2, 2] - true_VarK) / true_VarK < 0.01d, true); - Assert.AreEqual((covar[0, 1] - true_CovarUA) / true_CovarUA < 0.01d, true); - Assert.AreEqual((covar[0, 2] - true_CovarUK) / true_CovarUK < 0.01d, true); - Assert.AreEqual((covar[1, 2] - true_CovarAK) / true_CovarAK < 0.01d, true); - Assert.AreEqual((qVar - true_QVar) / true_QVar < 0.01d, true); - Assert.AreEqual((qSigma - true_QSigma) / true_QSigma < 0.01d, true); + Assert.IsLessThan(0.01d,(partials[0] - true_dXdU) / true_dXdU); + Assert.IsLessThan(0.01d, (partials[1] - true_dxdA) / true_dxdA); + Assert.IsLessThan(0.01d, (partials[2] - true_dxdK) / true_dxdK); + Assert.IsLessThan(0.01d, (covar[0, 0] - true_VarU) / true_VarU); + Assert.IsLessThan(0.01d, (covar[1, 1] - true_VarA) / true_VarA); + Assert.IsLessThan(0.01d, (covar[2, 2] - true_VarK) / true_VarK); + Assert.IsLessThan(0.01d, (covar[0, 1] - true_CovarUA) / true_CovarUA); + Assert.IsLessThan(0.01d, (covar[0, 2] - true_CovarUK) / true_CovarUK); + Assert.IsLessThan(0.01d, (covar[1, 2] - true_CovarAK) / true_CovarAK); + Assert.IsLessThan(0.01d, (qVar - true_QVar) / true_QVar); + Assert.IsLessThan(0.01d, (qSigma - true_QSigma) / true_QSigma); } /// @@ -225,14 +225,14 @@ public void Test_GEV_StandardError() public void Test_Construction() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(GEV.Xi, 100); - Assert.AreEqual(GEV.Alpha, 10); - Assert.AreEqual(GEV.Kappa, 0); + Assert.AreEqual(100,GEV.Xi); + Assert.AreEqual(10,GEV.Alpha); + Assert.AreEqual(0, GEV.Kappa); var GEV2 = new GeneralizedExtremeValue(-100, 1, 1); - Assert.AreEqual(GEV2.Xi, -100); - Assert.AreEqual(GEV2.Alpha, 1); - Assert.AreEqual(GEV2.Kappa, 1); + Assert.AreEqual(-100,GEV2.Xi); + Assert.AreEqual(1, GEV2.Alpha); + Assert.AreEqual(1, GEV2.Kappa); } /// @@ -258,12 +258,12 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(GEV.ParametersToString[0, 0], "Location (ξ)"); - Assert.AreEqual(GEV.ParametersToString[1, 0], "Scale (α)"); - Assert.AreEqual(GEV.ParametersToString[2, 0], "Shape (κ)"); - Assert.AreEqual(GEV.ParametersToString[0, 1], "100"); - Assert.AreEqual(GEV.ParametersToString[1, 1], "10"); - Assert.AreEqual(GEV.ParametersToString[2, 1], "0"); + Assert.AreEqual("Location (ξ)",GEV.ParametersToString[0, 0] ); + Assert.AreEqual("Scale (α)", GEV.ParametersToString[1, 0]); + Assert.AreEqual("Shape (κ)", GEV.ParametersToString[2, 0]); + Assert.AreEqual("100", GEV.ParametersToString[0, 1]); + Assert.AreEqual("10", GEV.ParametersToString[1, 1]); + Assert.AreEqual("0", GEV.ParametersToString[2, 1]); } /// @@ -291,10 +291,10 @@ public void Test_Mean() Assert.AreEqual(GEV.Mean, true_val); var GEV2 = new GeneralizedExtremeValue(100, 10, 0.9); - Assert.AreEqual(GEV2.Mean, 100.42482,1e-04); + Assert.AreEqual(100.42482, GEV2.Mean, 1e-04); var GEV3 = new GeneralizedExtremeValue(100, 10, 10); - Assert.AreEqual(GEV3.Mean,double.NaN); + Assert.AreEqual(double.NaN,GEV3.Mean); } /// @@ -304,10 +304,10 @@ public void Test_Mean() public void Test_Median() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(GEV.Median, 103.66512, 1e-04); + Assert.AreEqual(103.66512, GEV.Median, 1e-04); var GEV2 = new GeneralizedExtremeValue(100, 10, 0.9); - Assert.AreEqual(GEV2.Median, 104.3419519, 1e-04); + Assert.AreEqual(104.3419519, GEV2.Median, 1e-04); } /// @@ -317,10 +317,10 @@ public void Test_Median() public void Test_Mode() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(GEV.Mode, 100); + Assert.AreEqual(100,GEV.Mode); var GEV2 = new GeneralizedExtremeValue(100, 10, 1); - Assert.AreEqual(GEV2.Mode, 95); + Assert.AreEqual(95,GEV2.Mode); } /// @@ -330,13 +330,13 @@ public void Test_Mode() public void Test_StandardDeviation() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(GEV.StandardDeviation, 12.825498, 1e-05); + Assert.AreEqual(12.825498, GEV.StandardDeviation, 1e-05); var GEV2 = new GeneralizedExtremeValue(100, 10, 0.49); - Assert.AreEqual(GEV2.StandardDeviation, 9.280898, 1e-04); + Assert.AreEqual(9.280898, GEV2.StandardDeviation, 1e-04); var GEV3 = new GeneralizedExtremeValue(100, 10, 1); - Assert.AreEqual(GEV3.StandardDeviation, double.NaN); + Assert.AreEqual(double.NaN,GEV3.StandardDeviation ); } /// @@ -346,13 +346,13 @@ public void Test_StandardDeviation() public void Test_Skewness() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(GEV.Skewness, 1.1396); + Assert.AreEqual(1.1396, GEV.Skewness); var GEV2 = new GeneralizedExtremeValue(100, 10, 0.3); - Assert.AreEqual(GEV2.Skewness, -0.0690175, 1e-03); + Assert.AreEqual(-0.0690175, GEV2.Skewness, 1e-03); var GEV3 = new GeneralizedExtremeValue(100, 10, 1); - Assert.AreEqual(GEV3.Skewness, double.NaN); + Assert.AreEqual(double.NaN, GEV3.Skewness); } /// @@ -362,13 +362,13 @@ public void Test_Skewness() public void Test_Kurtosis() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(GEV.Kurtosis, 3 + 12d / 5d); + Assert.AreEqual(3 + 12d / 5d,GEV.Kurtosis ); var GEV2 = new GeneralizedExtremeValue(100, 10, 0.24); - Assert.AreEqual(GEV2.Kurtosis, 2.7659607, 1e-04); + Assert.AreEqual(2.7659607, GEV2.Kurtosis, 1e-04); var GEV3 = new GeneralizedExtremeValue(100, 10, 1); - Assert.AreEqual(GEV3.Kurtosis,double.NaN); + Assert.AreEqual(double.NaN,GEV3.Kurtosis); } /// @@ -378,10 +378,10 @@ public void Test_Kurtosis() public void Test_Minimum() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(GEV.Minimum,double.NegativeInfinity); + Assert.AreEqual(double.NegativeInfinity, GEV.Minimum); var GEV2 = new GeneralizedExtremeValue(100, 10, -5); - Assert.AreEqual(GEV2.Minimum, 98); + Assert.AreEqual(98, GEV2.Minimum); } /// @@ -391,10 +391,10 @@ public void Test_Minimum() public void Test_Maximum() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(GEV.Maximum,double.PositiveInfinity); + Assert.AreEqual(double.PositiveInfinity, GEV.Maximum); var GEV2 = new GeneralizedExtremeValue(100, 10, 1); - Assert.AreEqual(GEV2.Maximum, 110); + Assert.AreEqual(110, GEV2.Maximum); } /// @@ -404,11 +404,11 @@ public void Test_Maximum() public void Test_PDF() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(GEV.PDF(0), 0); - Assert.AreEqual(GEV.PDF(1), 0); + Assert.AreEqual(0,GEV.PDF(0)); + Assert.AreEqual(0,GEV.PDF(1)); var GEV2 = new GeneralizedExtremeValue(100, 10, 1); - Assert.AreEqual(GEV2.PDF(0), 1.67017007902456E-06,1e-10); + Assert.AreEqual(1.67017007902456E-06, GEV2.PDF(0), 1e-10); } /// @@ -418,12 +418,12 @@ public void Test_PDF() public void Test_CDF() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(GEV.CDF(100), 0.367879, 1e-04); - Assert.AreEqual(GEV.CDF(200), 0.9999546, 1e-07); + Assert.AreEqual(0.367879, GEV.CDF(100), 1e-04); + Assert.AreEqual(0.9999546, GEV.CDF(200), 1e-07); var GEV2 = new GeneralizedExtremeValue(100, 10, 1); - Assert.AreEqual(GEV2.CDF(100), 0.367879, 1e-05); - Assert.AreEqual(GEV2.CDF(200), 1); + Assert.AreEqual(0.367879, GEV2.CDF(100), 1e-05); + Assert.AreEqual(1,GEV2.CDF(200)); } /// @@ -433,9 +433,9 @@ public void Test_CDF() public void Test_InverseCDF() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(GEV.InverseCDF(0), double.NegativeInfinity); - Assert.AreEqual(GEV.InverseCDF(0.5), 103.66512, 1e-05); - Assert.AreEqual(GEV.InverseCDF(1), double.PositiveInfinity); + Assert.AreEqual(double.NegativeInfinity,GEV.InverseCDF(0) ); + Assert.AreEqual(103.66512, GEV.InverseCDF(0.5), 1e-05); + Assert.AreEqual(double.PositiveInfinity,GEV.InverseCDF(1) ); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_GeneralizedLogistic.cs b/Test_Numerics/Distributions/Univariate/Test_GeneralizedLogistic.cs index ad80f1a9..21859928 100644 --- a/Test_Numerics/Distributions/Univariate/Test_GeneralizedLogistic.cs +++ b/Test_Numerics/Distributions/Univariate/Test_GeneralizedLogistic.cs @@ -84,9 +84,9 @@ public void Test_GLO_MOM_Fit() double true_x = 31892d; double true_a = 9030d; double true_k = -0.05515d; - Assert.AreEqual((x - true_x) / true_x < 0.01d, true); - Assert.AreEqual((a - true_a) / true_a < 0.01d, true); - Assert.AreEqual((k - true_k) / true_k < 0.01d, true); + Assert.IsLessThan(0.01d,(x - true_x) / true_x); + Assert.IsLessThan(0.01d,(a - true_a) / true_a); + Assert.IsLessThan(0.01d, (k - true_k) / true_k); } /// @@ -116,10 +116,10 @@ public void Test_GLO_LMOM_Fit() Assert.AreEqual(a, true_a, 0.001d); Assert.AreEqual(k, true_k, 0.001d); var lmom = GLO.LinearMomentsFromParameters(GLO.GetParameters); - Assert.AreEqual(lmom[0], 1648.806d, 0.001d); - Assert.AreEqual(lmom[1], 138.2366d, 0.001d); - Assert.AreEqual(lmom[2], 0.1033903d, 0.001d); - Assert.AreEqual(lmom[3], 0.1755746d, 0.001d); + Assert.AreEqual(1648.806d, lmom[0], 0.001d); + Assert.AreEqual(138.2366d, lmom[1], 0.001d); + Assert.AreEqual(0.1033903d, lmom[2], 0.001d); + Assert.AreEqual(0.1755746d, lmom[3], 0.001d); } /// @@ -144,9 +144,9 @@ public void Test_GLO_MLE_Fit() double true_x = 30911.83d; double true_a = 9305.0205d; double true_k = -0.144152d; - Assert.AreEqual((x - true_x) / true_x < 0.01d, true); - Assert.AreEqual((a - true_a) / true_a < 0.01d, true); - Assert.AreEqual((k - true_k) / true_k < 0.01d, true); + Assert.IsLessThan(0.01d, (x - true_x) / true_x); + Assert.IsLessThan(0.01d, (a - true_a) / true_a); + Assert.IsLessThan(0.01d, (k - true_k) / true_k); } /// @@ -166,10 +166,10 @@ public void Test_GLO_Quantile() var GLO = new GeneralizedLogistic(31892d, 9030d, -0.05515d); double q100 = GLO.InverseCDF(0.99d); double true_100 = 79117d; - Assert.AreEqual((q100 - true_100) / true_100 < 0.01d, true); + Assert.IsLessThan(0.01d, (q100 - true_100) / true_100); double p = GLO.CDF(q100); double true_p = 0.99d; - Assert.AreEqual(p == true_p, true); + Assert.AreEqual(p, true_p); } /// @@ -185,9 +185,9 @@ public void Test_GLO_Partials() double true_dLocation = 1.0d; double true_dScale = 6.51695d; double true_dShape = -154595.08d; - Assert.AreEqual((dQdLocation - true_dLocation) / true_dLocation < 0.01d, true); - Assert.AreEqual((dQdScale - true_dScale) / true_dScale < 0.01d, true); - Assert.AreEqual((dQdShape - true_dShape) / true_dShape < 0.01d, true); + Assert.IsLessThan(0.01d, (dQdLocation - true_dLocation) / true_dLocation); + Assert.IsLessThan(0.01d, (dQdScale - true_dScale) / true_dScale); + Assert.IsLessThan(0.01d, (dQdShape - true_dShape) / true_dShape); } /// @@ -197,14 +197,14 @@ public void Test_GLO_Partials() public void Test_Construction() { var l = new GeneralizedLogistic(); - Assert.AreEqual(l.Xi, 100); - Assert.AreEqual(l.Alpha, 10); - Assert.AreEqual(l.Kappa, 0); + Assert.AreEqual(100,l.Xi); + Assert.AreEqual(10,l.Alpha); + Assert.AreEqual(0,l.Kappa); var l2 = new GeneralizedLogistic(-100, 10, 1); - Assert.AreEqual(l2.Xi, -100); - Assert.AreEqual(l2.Alpha, 10); - Assert.AreEqual(l2.Kappa, 1); + Assert.AreEqual(-100,l2.Xi); + Assert.AreEqual(10, l2.Alpha); + Assert.AreEqual(1, l2.Kappa); } /// @@ -230,12 +230,12 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var l = new GeneralizedLogistic(); - Assert.AreEqual(l.ParametersToString[0, 0], "Location (ξ)"); - Assert.AreEqual(l.ParametersToString[1, 0], "Scale (α)"); - Assert.AreEqual(l.ParametersToString[2, 0], "Shape (κ)"); - Assert.AreEqual(l.ParametersToString[0, 1], "100"); - Assert.AreEqual(l.ParametersToString[1, 1], "10"); - Assert.AreEqual(l.ParametersToString[2, 1], "0"); + Assert.AreEqual("Location (ξ)",l.ParametersToString[0, 0] ); + Assert.AreEqual("Scale (α)", l.ParametersToString[1, 0]); + Assert.AreEqual("Shape (κ)", l.ParametersToString[2, 0]); + Assert.AreEqual("100", l.ParametersToString[0, 1]); + Assert.AreEqual("10", l.ParametersToString[1, 1]); + Assert.AreEqual("0", l.ParametersToString[2, 1]); } /// @@ -259,13 +259,13 @@ public void Test_Moments() public void Test_Mean() { var l = new GeneralizedLogistic(); - Assert.AreEqual(l.Mean, 100); + Assert.AreEqual(100, l.Mean); var l2 = new GeneralizedLogistic(100, 10, 0.9); - Assert.AreEqual(l2.Mean, 9.44703, 1e-04); + Assert.AreEqual(9.44703, l2.Mean, 1e-04); var l3 = new GeneralizedLogistic(100, 10, 1); - Assert.AreEqual(l3.Mean, double.NaN); + Assert.AreEqual(double.NaN,l3.Mean ); } /// @@ -275,10 +275,10 @@ public void Test_Mean() public void Test_Median() { var l = new GeneralizedLogistic(); - Assert.AreEqual(l.Median, 100); + Assert.AreEqual(100, l.Median); var l2 = new GeneralizedLogistic(10, 10, 1); - Assert.AreEqual(l2.Median, 10); + Assert.AreEqual(10, l2.Median); } /// @@ -288,10 +288,10 @@ public void Test_Median() public void Test_Mode() { var l = new GeneralizedLogistic(); - Assert.AreEqual(l.Mode, 100); + Assert.AreEqual(100, l.Mode); var l2 = new GeneralizedLogistic(100, 10, 1); - Assert.AreEqual(l2.Mode, 95); + Assert.AreEqual(95, l2.Mode); } /// @@ -301,13 +301,13 @@ public void Test_Mode() public void Test_StandardDeviation() { var l = new GeneralizedLogistic(); - Assert.AreEqual(l.StandardDeviation, 18.13799, 1e-04); + Assert.AreEqual(18.13799, l.StandardDeviation, 1e-04); var l2 = new GeneralizedLogistic(100, 10, 0.4); - Assert.AreEqual(l2.StandardDeviation, 39.76482, 1e-04); + Assert.AreEqual(39.76482, l2.StandardDeviation, 1e-04); var l3 = new GeneralizedLogistic(100, 10, 1); - Assert.AreEqual(l3.StandardDeviation, double.NaN); + Assert.AreEqual(double.NaN,l3.StandardDeviation ); } /// @@ -317,13 +317,13 @@ public void Test_StandardDeviation() public void Test_Skewness() { var l = new GeneralizedLogistic(); - Assert.AreEqual(l.Skewness, 0); + Assert.AreEqual(0, l.Skewness); var l2 = new GeneralizedLogistic(100, 10, 0.3); - Assert.AreEqual(l2.Skewness, -10.90354, 1e-04); + Assert.AreEqual(-10.90354, l2.Skewness, 1e-04); var l3 = new GeneralizedLogistic(100, 10, 1); - Assert.AreEqual(l3.Skewness, double.NaN); + Assert.AreEqual(double.NaN, l3.Skewness); } /// @@ -333,13 +333,13 @@ public void Test_Skewness() public void Test_Kurtosis() { var l = new GeneralizedLogistic(); - Assert.AreEqual(l.Kurtosis, 21d / 5d); + Assert.AreEqual(21d / 5d,l.Kurtosis); var l2 = new GeneralizedLogistic(100, 10, 0.24); - Assert.AreEqual(l2.Kurtosis, 199.733369,1e-04); + Assert.AreEqual(199.733369, l2.Kurtosis, 1e-04); var l3 = new GeneralizedLogistic(100, 10, 0.25); - Assert.AreEqual(l3.Kurtosis, double.NaN); + Assert.AreEqual(double.NaN,l3.Kurtosis ); } /// @@ -349,10 +349,10 @@ public void Test_Kurtosis() public void Test_Minimum() { var l = new GeneralizedLogistic(); - Assert.AreEqual(l.Minimum, double.NegativeInfinity); + Assert.AreEqual(double.NegativeInfinity, l.Minimum); var l2 = new GeneralizedLogistic(100, 10, -5); - Assert.AreEqual(l2.Minimum, 98); + Assert.AreEqual(98, l2.Minimum); } /// @@ -362,10 +362,10 @@ public void Test_Minimum() public void Test_Maximum() { var l = new GeneralizedLogistic(); - Assert.AreEqual(l.Maximum, double.PositiveInfinity); + Assert.AreEqual(double.PositiveInfinity, l.Maximum); var l2 = new GeneralizedLogistic(100, 10, 1); - Assert.AreEqual(l2.Maximum, 110); + Assert.AreEqual(110, l2.Maximum); } /// @@ -375,12 +375,12 @@ public void Test_Maximum() public void Test_PDF() { var l = new GeneralizedLogistic(); - Assert.AreEqual(l.PDF(100), 0.025); - Assert.AreEqual(l.PDF(0), 4.5395e-06,1e-10); + Assert.AreEqual(0.025,l.PDF(100) ); + Assert.AreEqual(4.5395e-06, l.PDF(0), 1e-10); var l2 = new GeneralizedLogistic(100, 10, 1); - Assert.AreEqual(l2.PDF(100), 0.025); - Assert.AreEqual(l2.PDF(0),6.9444e-04,1e-08); + Assert.AreEqual(0.025, l2.PDF(100)); + Assert.AreEqual(6.9444e-04, l2.PDF(0),1e-08); } /// @@ -390,11 +390,11 @@ public void Test_PDF() public void Test_CDF() { var l = new GeneralizedLogistic(); - Assert.AreEqual(l.CDF(100), 0.5); - Assert.AreEqual(l.CDF(0), 4.5397e-05, 1e-8); + Assert.AreEqual(0.5, l.CDF(100)); + Assert.AreEqual(4.5397e-05, l.CDF(0), 1e-8); var l2 = new GeneralizedLogistic(100, 10, 1); - Assert.AreEqual(l2.CDF(0), 0.083333, 1e-04); + Assert.AreEqual(0.083333, l2.CDF(0), 1e-04); } /// @@ -404,13 +404,13 @@ public void Test_CDF() public void Test_InverseCDF() { var l = new GeneralizedLogistic(); - Assert.AreEqual(l.InverseCDF(0), double.NegativeInfinity); - Assert.AreEqual(l.InverseCDF(0.5), 100); - Assert.AreEqual(l.InverseCDF(1),double.PositiveInfinity); + Assert.AreEqual(double.NegativeInfinity,l.InverseCDF(0)); + Assert.AreEqual(100, l.InverseCDF(0.5)); + Assert.AreEqual(double.PositiveInfinity, l.InverseCDF(1)); var l2 = new GeneralizedLogistic(100, 10, 1); - Assert.AreEqual(l2.InverseCDF(0.5), 100); - Assert.AreEqual(l2.InverseCDF(0.7), 105.714285,1e-04); + Assert.AreEqual(100, l2.InverseCDF(0.5)); + Assert.AreEqual(105.714285, l2.InverseCDF(0.7), 1e-04); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_GeneralizedNormal.cs b/Test_Numerics/Distributions/Univariate/Test_GeneralizedNormal.cs index ff3d11a5..708dfe74 100644 --- a/Test_Numerics/Distributions/Univariate/Test_GeneralizedNormal.cs +++ b/Test_Numerics/Distributions/Univariate/Test_GeneralizedNormal.cs @@ -78,10 +78,10 @@ public void Test_GNO_LMOM() Assert.AreEqual(k, true_k, 0.0001d); var lmom = gno.LinearMomentsFromParameters(gno.GetParameters); - Assert.AreEqual(lmom[0], 9.95751634, 0.0001d); - Assert.AreEqual(lmom[1], 1.98224114, 0.0001d); - Assert.AreEqual(lmom[2], 0.06380804, 0.0001d); - Assert.AreEqual(lmom[3], 0.1258014, 0.0001d); + Assert.AreEqual(9.95751634, lmom[0], 0.0001d); + Assert.AreEqual(1.98224114, lmom[1], 0.0001d); + Assert.AreEqual(0.06380804, lmom[2], 0.0001d); + Assert.AreEqual(0.1258014, lmom[3], 0.0001d); } @@ -165,14 +165,14 @@ public void Test_GNO_PartialDerivatives() public void Test_Construction() { var n = new GeneralizedNormal(); - Assert.AreEqual(n.Xi, 100); - Assert.AreEqual(n.Alpha, 10); - Assert.AreEqual(n.Kappa, 0); + Assert.AreEqual(100,n.Xi); + Assert.AreEqual(10, n.Alpha); + Assert.AreEqual(0, n.Kappa); var n2 = new GeneralizedNormal(-100, 1, 1); - Assert.AreEqual(n2.Xi, -100); - Assert.AreEqual(n2.Alpha, 1); - Assert.AreEqual(n2.Kappa, 1); + Assert.AreEqual(-100, n2.Xi); + Assert.AreEqual(1, n2.Alpha); + Assert.AreEqual(1, n2.Kappa); } /// @@ -198,12 +198,12 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var n = new GeneralizedNormal(); - Assert.AreEqual(n.ParametersToString[0, 0], "Location (ξ)"); - Assert.AreEqual(n.ParametersToString[1, 0], "Scale (α)"); - Assert.AreEqual(n.ParametersToString[2, 0], "Shape (κ)"); - Assert.AreEqual(n.ParametersToString[0, 1], "100"); - Assert.AreEqual(n.ParametersToString[1, 1], "10"); - Assert.AreEqual(n.ParametersToString[2, 1], "0"); + Assert.AreEqual("Location (ξ)", n.ParametersToString[0, 0]); + Assert.AreEqual("Scale (α)", n.ParametersToString[1, 0]); + Assert.AreEqual("Shape (κ)", n.ParametersToString[2, 0]); + Assert.AreEqual("100", n.ParametersToString[0, 1]); + Assert.AreEqual("10",n.ParametersToString[1, 1]); + Assert.AreEqual("0", n.ParametersToString[2, 1]); } /// diff --git a/Test_Numerics/Distributions/Univariate/Test_GeneralizedPareto.cs b/Test_Numerics/Distributions/Univariate/Test_GeneralizedPareto.cs index 460ec679..c2b9d3be 100644 --- a/Test_Numerics/Distributions/Univariate/Test_GeneralizedPareto.cs +++ b/Test_Numerics/Distributions/Univariate/Test_GeneralizedPareto.cs @@ -84,9 +84,9 @@ public void Test_GPA_MOM_Fit() double true_x = 50169.23d; double true_a = 55443d; double true_k = 0.0956d; - Assert.AreEqual((x - true_x) / true_x < 0.01d, true); - Assert.AreEqual((a - true_a) / true_a < 0.01d, true); - Assert.AreEqual((k - true_k) / true_k < 0.01d, true); + Assert.IsLessThan(0.01d,(x - true_x) / true_x ); + Assert.IsLessThan(0.01d,(a - true_a) / true_a ); + Assert.IsLessThan(0.01d, (k - true_k) / true_k); } /// @@ -108,10 +108,10 @@ public void Test_GPA_LMOM_Fit() Assert.AreEqual(a, true_a, 0.001d); Assert.AreEqual(k, true_k, 0.001d); var lmom = GPA.LinearMomentsFromParameters(GPA.GetParameters); - Assert.AreEqual(lmom[0], 1648.806d, 0.001d); - Assert.AreEqual(lmom[1], 138.2366d, 0.001d); - Assert.AreEqual(lmom[2], 0.1033903d, 0.001d); - Assert.AreEqual(lmom[3], 0.03073215d, 0.001d); + Assert.AreEqual(1648.806d, lmom[0], 0.001d); + Assert.AreEqual(138.2366d, lmom[1], 0.001d); + Assert.AreEqual(0.1033903d, lmom[2], 0.001d); + Assert.AreEqual(0.03073215d, lmom[3], 0.001d); } /// @@ -136,9 +136,9 @@ public void Test_GPA_ModMOM_Fit() double true_x = 50203.04d; double true_a = 55365.72d; double true_k = 0.0948d; - Assert.AreEqual((x - true_x) / true_x < 0.01d, true); - Assert.AreEqual((a - true_a) / true_a < 0.01d, true); - Assert.AreEqual((k - true_k) / true_k < 0.01d, true); + Assert.IsLessThan(0.01d, (x - true_x) / true_x); + Assert.IsLessThan(0.01d, (a - true_a) / true_a); + Assert.IsLessThan(0.01d, (k - true_k) / true_k); } /// @@ -163,9 +163,9 @@ public void Test_GPA_MLE_Fit() double true_x = 50400d; double true_a = 55142.29d; double true_k = 0.0945d; - Assert.AreEqual((x - true_x) / true_x < 0.01d, true); - Assert.AreEqual((a - true_a) / true_a < 0.01d, true); - Assert.AreEqual((k - true_k) / true_k < 0.01d, true); + Assert.IsLessThan(0.01d, (x - true_x) / true_x); + Assert.IsLessThan(0.01d, (a - true_a) / true_a); + Assert.IsLessThan(0.01d, (k - true_k) / true_k); } /// @@ -185,10 +185,10 @@ public void Test_GPA_Quantile() var GPA = new GeneralizedPareto(50203.04d, 55365.72d, 0.0948d); double q100 = GPA.InverseCDF(0.99d); double true_q100 = 256803d; - Assert.AreEqual((q100 - true_q100) / true_q100 < 0.01d, true); + Assert.IsLessThan(0.01d, (q100 - true_q100) / true_q100); double p = GPA.CDF(q100); double true_p = 0.99d; - Assert.AreEqual((p - true_p) / true_p < 0.01d, true); + Assert.IsLessThan(0.01d, (p - true_p) / true_p); } /// @@ -204,9 +204,9 @@ public void Test_GPA_Partials() double true_dLocation = 1.0d; double true_dScale = 3.7315488d; double true_dShape = -441209.53d; - Assert.AreEqual((dQdLocation - true_dLocation) / true_dLocation < 0.01d, true); - Assert.AreEqual((dQdScale - true_dScale) / true_dScale < 0.01d, true); - Assert.AreEqual((dQdShape - true_dShape) / true_dShape < 0.01d, true); + Assert.IsLessThan(0.01d, (dQdLocation - true_dLocation) / true_dLocation); + Assert.IsLessThan(0.01d, (dQdScale - true_dScale) / true_dScale); + Assert.IsLessThan(0.01d, (dQdShape - true_dShape) / true_dShape); } /// @@ -228,13 +228,13 @@ public void Test_GPA_StandardError() var GPA = new GeneralizedPareto(50203.04d, 55365.72d, 0.0948d); double qVar99 = Math.Sqrt(GPA.QuantileVariance(0.99d, sample.Length, ParameterEstimationMethod.MethodOfMoments)); double true_qVar99 = 16657d; - Assert.AreEqual((qVar99 - true_qVar99) / true_qVar99 < 0.01d, true); + Assert.IsLessThan(0.01d, (qVar99 - true_qVar99) / true_qVar99); // Maximum Likelihood GPA = new GeneralizedPareto(50400d, 55142.29d, 0.0945d); qVar99 = Math.Sqrt(GPA.QuantileVariance(0.99d, sample.Length, ParameterEstimationMethod.MaximumLikelihood)); true_qVar99 = 15938d; - Assert.AreEqual((qVar99 - true_qVar99) / true_qVar99 < 0.01d, true); + Assert.IsLessThan(0.01d, (qVar99 - true_qVar99) / true_qVar99); } /// @@ -244,14 +244,14 @@ public void Test_GPA_StandardError() public void Test_Construction() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(GPA.Xi, 100); - Assert.AreEqual(GPA.Alpha, 10); - Assert.AreEqual(GPA.Kappa, 0); + Assert.AreEqual(100,GPA.Xi); + Assert.AreEqual(10,GPA.Alpha); + Assert.AreEqual(0, GPA.Kappa); var GPA2 = new GeneralizedExtremeValue(-100, 1, 1); - Assert.AreEqual(GPA2.Xi, -100); - Assert.AreEqual(GPA2.Alpha, 1); - Assert.AreEqual(GPA2.Kappa, 1); + Assert.AreEqual(-100,GPA2.Xi); + Assert.AreEqual(1, GPA2.Alpha); + Assert.AreEqual(1, GPA2.Kappa); } /// @@ -277,12 +277,12 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(GPA.ParametersToString[0, 0], "Location (ξ)"); - Assert.AreEqual(GPA.ParametersToString[1, 0], "Scale (α)"); - Assert.AreEqual(GPA.ParametersToString[2, 0], "Shape (κ)"); - Assert.AreEqual(GPA.ParametersToString[0, 1], "100"); - Assert.AreEqual(GPA.ParametersToString[1, 1], "10"); - Assert.AreEqual(GPA.ParametersToString[2, 1], "0"); + Assert.AreEqual("Location (ξ)",GPA.ParametersToString[0, 0] ); + Assert.AreEqual("Scale (α)", GPA.ParametersToString[1, 0]); + Assert.AreEqual("Shape (κ)", GPA.ParametersToString[2, 0]); + Assert.AreEqual("100", GPA.ParametersToString[0, 1]); + Assert.AreEqual("10", GPA.ParametersToString[1, 1]); + Assert.AreEqual("0", GPA.ParametersToString[2, 1]); } /// @@ -306,13 +306,13 @@ public void Test_Moments() public void Test_Mean() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(GPA.Mean, 110); + Assert.AreEqual(110, GPA.Mean); var GPA2 = new GeneralizedPareto(100, 10, 0.9); - Assert.AreEqual(GPA2.Mean, 105.26315, 1e-04); + Assert.AreEqual(105.26315, GPA2.Mean, 1e-04); var GPA3 = new GeneralizedPareto(100, 10, 1); - Assert.AreEqual(GPA3.Mean, double.NaN); + Assert.AreEqual(double.NaN,GPA3.Mean); } /// @@ -322,10 +322,10 @@ public void Test_Mean() public void Test_Median() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(GPA.Median, 106.93147, 1e-04); + Assert.AreEqual(106.93147, GPA.Median, 1e-04); var GPA2 = new GeneralizedPareto(100, 10, 1); - Assert.AreEqual(GPA2.Median, 95); + Assert.AreEqual(95, GPA2.Median); } /// @@ -335,10 +335,10 @@ public void Test_Median() public void Test_Mode() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(GPA.Mode, 100); + Assert.AreEqual(100, GPA.Mode); var GPA2 = new GeneralizedPareto(100, 10, 1); - Assert.AreEqual(GPA2.Mode, 95); + Assert.AreEqual(95, GPA2.Mode); } /// @@ -348,13 +348,13 @@ public void Test_Mode() public void Test_StandardDeviation() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(GPA.StandardDeviation, 10); + Assert.AreEqual(10, GPA.StandardDeviation); var GPA2 = new GeneralizedPareto(100, 10, 0.25); - Assert.AreEqual(GPA2.StandardDeviation, 6.531972, 1e-04); + Assert.AreEqual(6.531972, GPA2.StandardDeviation, 1e-04); var GPA3 = new GeneralizedPareto(100, 10, 1); - Assert.AreEqual(GPA3.StandardDeviation, double.NaN); + Assert.AreEqual(double.NaN, GPA3.StandardDeviation); } /// @@ -364,13 +364,13 @@ public void Test_StandardDeviation() public void Test_Skewness() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(GPA.Skewness, 2); + Assert.AreEqual(2,GPA.Skewness); var GPA2 = new GeneralizedPareto(100, 10, 0.3); - Assert.AreEqual(GPA2.Skewness, 0.932039, 1e-04); + Assert.AreEqual(0.932039, GPA2.Skewness, 1e-04); var GPA3 = new GeneralizedPareto(100, 10, 1); - Assert.AreEqual(GPA3.Skewness, double.NaN); + Assert.AreEqual(double.NaN,GPA3.Skewness ); } /// @@ -380,13 +380,13 @@ public void Test_Skewness() public void Test_Kurtosis() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(GPA.Kurtosis, 9); + Assert.AreEqual(9, GPA.Kurtosis); var GPA2 = new GeneralizedPareto(100, 10, 0.24); - Assert.AreEqual(GPA2.Kurtosis, 3.786748, 1e-04); + Assert.AreEqual(3.786748, GPA2.Kurtosis, 1e-04); var GPA3 = new GeneralizedPareto(100, 10, 1); - Assert.AreEqual(GPA3.Kurtosis, double.NaN); + Assert.AreEqual(double.NaN, GPA3.Kurtosis); } /// @@ -396,7 +396,7 @@ public void Test_Kurtosis() public void Test_Minimum() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(GPA.Minimum, 100); + Assert.AreEqual(100, GPA.Minimum); } /// @@ -406,10 +406,10 @@ public void Test_Minimum() public void Test_Maximum() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(GPA.Maximum, double.PositiveInfinity); + Assert.AreEqual(double.PositiveInfinity, GPA.Maximum); var GPA2 = new GeneralizedPareto(100, 10, 1); - Assert.AreEqual(GPA2.Maximum, 110); + Assert.AreEqual(110, GPA2.Maximum); } /// @@ -419,12 +419,12 @@ public void Test_Maximum() public void Test_PDF() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(GPA.PDF(100), 0.1); - Assert.AreEqual(GPA.PDF(200), 4.53999e-06, 1e-10); + Assert.AreEqual(0.1,GPA.PDF(100)); + Assert.AreEqual(4.53999e-06, GPA.PDF(200), 1e-10); var GPA2 = new GeneralizedPareto(100, 10, 1); - Assert.AreEqual(GPA2.PDF(200), 0); - Assert.AreEqual(GPA2.PDF(50), 0); + Assert.AreEqual(0,GPA2.PDF(200)); + Assert.AreEqual(0,GPA2.PDF(50)); } /// @@ -434,14 +434,14 @@ public void Test_PDF() public void Test_CDF() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(GPA.CDF(100), 0); - Assert.AreEqual(GPA.CDF(0), 0, 1e-04); - Assert.AreEqual(GPA.CDF(200), 0.999954, 1e-06); + Assert.AreEqual(0, GPA.CDF(100)); + Assert.AreEqual(0, GPA.CDF(0), 1e-04); + Assert.AreEqual(0.999954, GPA.CDF(200), 1e-06); var GPA2 = new GeneralizedPareto(100, 10, 1); - Assert.AreEqual(GPA2.CDF(50), 0); - Assert.AreEqual(GPA2.CDF(20), 0); - Assert.AreEqual(GPA2.CDF(200), 1); + Assert.AreEqual(0, GPA2.CDF(50)); + Assert.AreEqual(0, GPA2.CDF(20)); + Assert.AreEqual(1, GPA2.CDF(200)); } /// @@ -451,12 +451,12 @@ public void Test_CDF() public void Test_InverseCDF() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(GPA.InverseCDF(0), 100); - Assert.AreEqual(GPA.InverseCDF(1), double.PositiveInfinity); - Assert.AreEqual(GPA.InverseCDF(0.5), 106.93147, 1e-04); + Assert.AreEqual(100, GPA.InverseCDF(0)); + Assert.AreEqual(double.PositiveInfinity,GPA.InverseCDF(1)); + Assert.AreEqual(106.93147, GPA.InverseCDF(0.5), 1e-04); var GPA2 = new GeneralizedPareto(100, 10, 1); - Assert.AreEqual(GPA2.InverseCDF(0.3), 103); + Assert.AreEqual(103,GPA2.InverseCDF(0.3)); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_Geometric.cs b/Test_Numerics/Distributions/Univariate/Test_Geometric.cs index 1ff80abc..279bf7c2 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Geometric.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Geometric.cs @@ -91,13 +91,13 @@ public void Test_GeometricDist() public void Test_Construction() { var G = new Geometric(); - Assert.AreEqual(G.ProbabilityOfSuccess, 0.5); + Assert.AreEqual(0.5, G.ProbabilityOfSuccess); var G2 = new Geometric(0); - Assert.AreEqual(G2.ProbabilityOfSuccess, 0); + Assert.AreEqual(0, G2.ProbabilityOfSuccess); var G3 = new Geometric(1); - Assert.AreEqual(G3.ProbabilityOfSuccess, 1); + Assert.AreEqual(1, G3.ProbabilityOfSuccess); } /// @@ -126,8 +126,8 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var G = new Geometric(); - Assert.AreEqual(G.ParametersToString[0, 0], "Probability (p)"); - Assert.AreEqual(G.ParametersToString[0, 1], "0.5"); + Assert.AreEqual("Probability (p)",G.ParametersToString[0, 0]); + Assert.AreEqual("0.5", G.ParametersToString[0, 1]); } /// @@ -137,10 +137,10 @@ public void Test_ParametersToString() public void Test_Mean() { var G = new Geometric(); - Assert.AreEqual(G.Mean, 1); + Assert.AreEqual(1, G.Mean); var G2 = new Geometric(0.3); - Assert.AreEqual(G2.Mean, 2.3333, 1e-04); + Assert.AreEqual(2.3333, G2.Mean, 1e-04); } /// @@ -150,13 +150,13 @@ public void Test_Mean() public void Test_Median() { var G = new Geometric(0.0001); - Assert.AreEqual(G.Median, 6931); + Assert.AreEqual(6931,G.Median); var G2 = new Geometric(0.1); - Assert.AreEqual(G2.Median, 6); + Assert.AreEqual(6, G2.Median); var G3 = new Geometric(0.9); - Assert.AreEqual(G3.Median, 0); + Assert.AreEqual(0, G3.Median); } /// @@ -166,10 +166,10 @@ public void Test_Median() public void Test_Mode() { var G = new Geometric(); - Assert.AreEqual(G.Mode,0); + Assert.AreEqual(0, G.Mode); var G2 = new Geometric(0); - Assert.AreEqual(G2.Mode,0); + Assert.AreEqual(0, G2.Mode); } /// @@ -179,10 +179,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var G = new Geometric(); - Assert.AreEqual(G.StandardDeviation, 1.41421,1e-04); + Assert.AreEqual(1.41421, G.StandardDeviation, 1e-04); var G2 = new Geometric(0.3); - Assert.AreEqual(G2.StandardDeviation, 2.78886, 1e-04); + Assert.AreEqual(2.78886, G2.StandardDeviation, 1e-04); } /// @@ -192,10 +192,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var G = new Geometric(); - Assert.AreEqual(G.Skewness, 2.12132, 1e-04); + Assert.AreEqual(2.12132, G.Skewness, 1e-04); var G2 = new Geometric(0.3); - Assert.AreEqual(G2.Skewness, 2.03188, 1e-04); + Assert.AreEqual(2.03188, G2.Skewness, 1e-04); } /// @@ -205,10 +205,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var G = new Geometric(); - Assert.AreEqual(G.Kurtosis, 9.5); + Assert.AreEqual(9.5,G.Kurtosis); var G2 = new Geometric(0.3); - Assert.AreEqual(G2.Kurtosis, 9.12857, 1e-04); + Assert.AreEqual(9.12857, G2.Kurtosis, 1e-04); } /// @@ -218,12 +218,12 @@ public void Test_Kurtosis() public void Test_MinMax() { var G = new Geometric(); - Assert.AreEqual(G.Minimum, 0); - Assert.AreEqual(G.Maximum,double.PositiveInfinity); + Assert.AreEqual(0,G.Minimum); + Assert.AreEqual(double.PositiveInfinity,G.Maximum); var G2 = new Geometric(0.3); - Assert.AreEqual(G2.Minimum, 0); - Assert.AreEqual(G2.Maximum, double.PositiveInfinity); + Assert.AreEqual(0, G2.Minimum); + Assert.AreEqual(double.PositiveInfinity, G2.Maximum); } /// @@ -233,13 +233,13 @@ public void Test_MinMax() public void Test_PDF() { var G = new Geometric(); - Assert.AreEqual(G.PDF(0), 0.5); - Assert.AreEqual(G.PDF(2), 0.125); - Assert.AreEqual(G.PDF(-1), 0); + Assert.AreEqual(0.5,G.PDF(0)); + Assert.AreEqual(0.125,G.PDF(2)); + Assert.AreEqual(0, G.PDF(-1)); var G2 = new Geometric(0.3); - Assert.AreEqual(G2.PDF(0), 0.3); - Assert.AreEqual(G2.PDF(2.5), 0.122989, 1e-05); + Assert.AreEqual(0.3, G2.PDF(0)); + Assert.AreEqual(0.122989, G2.PDF(2.5), 1e-05); } /// @@ -249,14 +249,14 @@ public void Test_PDF() public void Test_CDF() { var G = new Geometric(); - Assert.AreEqual(G.CDF(0), 0.5); - Assert.AreEqual(G.CDF(2), 0.875); - Assert.AreEqual(G.CDF(-1), 0); - Assert.AreEqual(G.CDF(double.PositiveInfinity), 1); + Assert.AreEqual(0.5, G.CDF(0)); + Assert.AreEqual(0.875, G.CDF(2)); + Assert.AreEqual(0, G.CDF(-1)); + Assert.AreEqual(1, G.CDF(double.PositiveInfinity)); var G2 = new Geometric(0.3); - Assert.AreEqual(G2.CDF(2), 0.657); - Assert.AreEqual(G2.CDF(100), 1,1e-04); + Assert.AreEqual(0.657, G2.CDF(2)); + Assert.AreEqual(1,G2.CDF(100), 1e-04); } /// @@ -266,12 +266,12 @@ public void Test_CDF() public void Test_InverseCDF() { var G = new Geometric(); - Assert.AreEqual(G.InverseCDF(0.3), 0); - Assert.AreEqual(G.InverseCDF(0.7), 1, 1e-04); + Assert.AreEqual(0,G.InverseCDF(0.3)); + Assert.AreEqual(1,G.InverseCDF(0.7), 1e-04); var G2 = new Geometric(0.3); - Assert.AreEqual(G2.InverseCDF(0.5), 1, 1e-04); - Assert.AreEqual(G2.InverseCDF(0.9), 6); + Assert.AreEqual(1,G2.InverseCDF(0.5), 1e-04); + Assert.AreEqual(6,G2.InverseCDF(0.9)); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_Gumbel.cs b/Test_Numerics/Distributions/Univariate/Test_Gumbel.cs index e41fbe42..f9bb555b 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Gumbel.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Gumbel.cs @@ -80,8 +80,8 @@ public void Test_GUM_MOM_Fit() double a = GUM.Alpha; double true_x = 8074.4d; double true_a = 4441.4d; - Assert.AreEqual((x - true_x) / true_x < 0.01d, true); - Assert.AreEqual((a - true_a) / true_a < 0.01d, true); + Assert.IsLessThan(0.01d,(x - true_x) / true_x); + Assert.IsLessThan(0.01d,(a - true_a) / true_a ); } /// @@ -100,10 +100,10 @@ public void Test_GUM_LMOM_Fit() Assert.AreEqual(x, true_x, 0.001d); Assert.AreEqual(a, true_a, 0.001d); var lmom = GUM.LinearMomentsFromParameters(GUM.GetParameters); - Assert.AreEqual(lmom[0], 1648.806d, 0.001d); - Assert.AreEqual(lmom[1], 138.2366d, 0.001d); - Assert.AreEqual(lmom[2], 0.169925d, 0.001d); - Assert.AreEqual(lmom[3], 0.150375d, 0.001d); + Assert.AreEqual(1648.806d, lmom[0], 0.001d); + Assert.AreEqual(138.2366d, lmom[1], 0.001d); + Assert.AreEqual(0.169925d, lmom[2], 0.001d); + Assert.AreEqual(0.150375d, lmom[3], 0.001d); } /// @@ -126,8 +126,8 @@ public void Test_GUM_MLE_Fit() double a = GUM.Alpha; double true_x = 8049.6d; double true_a = 4478.6d; - Assert.AreEqual((x - true_x) / true_x < 0.01d, true); - Assert.AreEqual((a - true_a) / true_a < 0.01d, true); + Assert.IsLessThan(0.01d, (x - true_x) / true_x); + Assert.IsLessThan(0.01d, (a - true_a) / true_a); } /// @@ -147,10 +147,10 @@ public void Test_Gumbel_Quantile() var GUM = new Gumbel(8049.6d, 4478.6d); double q100 = GUM.InverseCDF(0.99d); double true_q100 = 28652d; - Assert.AreEqual((q100 - true_q100) / true_q100 < 0.01d, true); + Assert.IsLessThan(0.01d, (q100 - true_q100) / true_q100); double p = GUM.CDF(q100); double true_p = 0.99d; - Assert.AreEqual((p - true_p) / true_p < 0.01d, true); + Assert.IsLessThan(0.01d, (p - true_p) / true_p); } @@ -173,7 +173,7 @@ public void Test_Gumbel_StandardError() var GUM = new Gumbel(8049.6d, 4478.6d); double qVar99 = Math.Sqrt(GUM.QuantileVariance(0.99d, 53, ParameterEstimationMethod.MaximumLikelihood)); double true_qVar99 = 2486.5d; - Assert.AreEqual((qVar99 - true_qVar99) / true_qVar99 < 0.01d, true); + Assert.IsLessThan(0.01d, (qVar99 - true_qVar99) / true_qVar99); } /// @@ -183,12 +183,12 @@ public void Test_Gumbel_StandardError() public void Test_Construction() { var GUM = new Gumbel(); - Assert.AreEqual(GUM.Xi, 100); - Assert.AreEqual(GUM.Alpha, 10); + Assert.AreEqual(100,GUM.Xi); + Assert.AreEqual(10,GUM.Alpha); var GUM2 = new Gumbel(-100, 1); - Assert.AreEqual(GUM2.Xi, -100); - Assert.AreEqual(GUM2.Alpha, 1); + Assert.AreEqual(-100,GUM2.Xi); + Assert.AreEqual(1, GUM2.Alpha); } /// @@ -214,10 +214,10 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var GUM = new Gumbel(); - Assert.AreEqual(GUM.ParametersToString[0, 0], "Location (ξ)"); - Assert.AreEqual(GUM.ParametersToString[1, 0], "Scale (α)"); - Assert.AreEqual(GUM.ParametersToString[0, 1], "100"); - Assert.AreEqual(GUM.ParametersToString[1, 1], "10"); + Assert.AreEqual("Location (ξ)",GUM.ParametersToString[0, 0] ); + Assert.AreEqual("Scale (α)", GUM.ParametersToString[1, 0]); + Assert.AreEqual("100", GUM.ParametersToString[0, 1]); + Assert.AreEqual("10", GUM.ParametersToString[1, 1]); } /// @@ -241,10 +241,10 @@ public void Test_Moments() public void Test_Mean() { var GUM = new Gumbel(); - Assert.AreEqual(GUM.Mean, 105.77215, 1e-04); + Assert.AreEqual(105.77215, GUM.Mean, 1e-04); var GUM2 = new Gumbel(10, 1); - Assert.AreEqual(GUM2.Mean, 10.577215, 1e-04); + Assert.AreEqual(10.577215, GUM2.Mean, 1e-04); } /// @@ -254,10 +254,10 @@ public void Test_Mean() public void Test_Median() { var GUM = new Gumbel(); - Assert.AreEqual(GUM.Median, 103.66512, 1e-05); + Assert.AreEqual(103.66512, GUM.Median, 1e-05); var GUM2 = new Gumbel(10, 1); - Assert.AreEqual(GUM2.Median, 10.366512, 1e-04); + Assert.AreEqual(10.366512, GUM2.Median, 1e-04); } /// @@ -267,10 +267,10 @@ public void Test_Median() public void Test_StandardDeviation() { var GUM = new Gumbel(); - Assert.AreEqual(GUM.StandardDeviation, 12.82549, 1e-04); + Assert.AreEqual(12.82549, GUM.StandardDeviation, 1e-04); var GUM2 = new Gumbel(10, 1); - Assert.AreEqual(GUM2.StandardDeviation, 1.28254, 1e-04); + Assert.AreEqual(1.28254, GUM2.StandardDeviation, 1e-04); } /// @@ -280,10 +280,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var GUM = new Gumbel(); - Assert.AreEqual(GUM.Skewness, 1.1396); + Assert.AreEqual(1.1396,GUM.Skewness); var GUM2 = new Gumbel(10, 1); - Assert.AreEqual(GUM2.Skewness, 1.1396); + Assert.AreEqual(1.1396, GUM2.Skewness); } /// @@ -293,10 +293,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var GUM = new Gumbel(); - Assert.AreEqual(GUM.Kurtosis, 5.4); + Assert.AreEqual(5.4, GUM.Kurtosis); var GUM2 = new Gumbel(10, 1); - Assert.AreEqual(GUM2.Kurtosis, 5.4); + Assert.AreEqual(5.4, GUM2.Kurtosis); } /// @@ -306,12 +306,12 @@ public void Test_Kurtosis() public void Test_MinMax() { var GUM = new Gumbel(); - Assert.AreEqual(GUM.Minimum, double.NegativeInfinity); - Assert.AreEqual(GUM.Maximum,double.PositiveInfinity); + Assert.AreEqual(double.NegativeInfinity,GUM.Minimum); + Assert.AreEqual(double.PositiveInfinity, GUM.Maximum); var GUM2 = new Gumbel(10, 1); - Assert.AreEqual(GUM2.Minimum, double.NegativeInfinity); - Assert.AreEqual(GUM2.Maximum, double.PositiveInfinity); + Assert.AreEqual(double.NegativeInfinity, GUM2.Minimum); + Assert.AreEqual(double.PositiveInfinity, GUM2.Maximum); } /// @@ -321,12 +321,12 @@ public void Test_MinMax() public void Test_PDF() { var GUM = new Gumbel(); - Assert.AreEqual(GUM.PDF(100), 0.0367879, 1e-04); - Assert.AreEqual(GUM.PDF(0), 0); - Assert.AreEqual(GUM.PDF(200), 4.5397e-06, 1e-10); + Assert.AreEqual(0.0367879, GUM.PDF(100), 1e-04); + Assert.AreEqual(0,GUM.PDF(0)); + Assert.AreEqual(4.5397e-06, GUM.PDF(200), 1e-10); var GUM2 = new Gumbel(10, 1); - Assert.AreEqual(GUM2.PDF(17), 9.1105e-04, 1e-09); + Assert.AreEqual(9.1105e-04, GUM2.PDF(17), 1e-09); } /// @@ -336,12 +336,12 @@ public void Test_PDF() public void Test_CDF() { var GUM = new Gumbel(); - Assert.AreEqual(GUM.CDF(100), 0.36787, 1e-04); - Assert.AreEqual(GUM.CDF(50), 3.5073e-65, 1e-68); - Assert.AreEqual(GUM.CDF(-10), 0); + Assert.AreEqual(0.36787, GUM.CDF(100), 1e-04); + Assert.AreEqual(3.5073e-65, GUM.CDF(50), 1e-68); + Assert.AreEqual(0,GUM.CDF(-10)); var GUM2 = new Gumbel(10, 2); - Assert.AreEqual(GUM2.CDF(5), 5.11929e-06, 1e-10); + Assert.AreEqual(5.11929e-06, GUM2.CDF(5), 1e-10); } /// @@ -351,10 +351,10 @@ public void Test_CDF() public void Test_InverseCDF() { var GUM = new Gumbel(); - Assert.AreEqual(GUM.InverseCDF(0), double.NegativeInfinity); - Assert.AreEqual(GUM.InverseCDF(1), double.PositiveInfinity); - Assert.AreEqual(GUM.InverseCDF(0.3), 98.14373, 1e-04); - Assert.AreEqual(GUM.InverseCDF(0.7), 110.309304, 1e-04); + Assert.AreEqual(double.NegativeInfinity, GUM.InverseCDF(0)); + Assert.AreEqual(double.PositiveInfinity, GUM.InverseCDF(1)); + Assert.AreEqual(98.14373, GUM.InverseCDF(0.3), 1e-04); + Assert.AreEqual(110.309304, GUM.InverseCDF(0.7), 1e-04); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_InverseChiSquared.cs b/Test_Numerics/Distributions/Univariate/Test_InverseChiSquared.cs index 753cc153..24975eae 100644 --- a/Test_Numerics/Distributions/Univariate/Test_InverseChiSquared.cs +++ b/Test_Numerics/Distributions/Univariate/Test_InverseChiSquared.cs @@ -83,12 +83,12 @@ public void Test_InverseChiSquaredDist() public void Test_Construction() { var IX = new InverseChiSquared(); - Assert.AreEqual(IX.DegreesOfFreedom, 10); - Assert.AreEqual(IX.Sigma, 1); + Assert.AreEqual(10,IX.DegreesOfFreedom); + Assert.AreEqual(1,IX.Sigma); var IX2 = new InverseChiSquared(2, 1); - Assert.AreEqual(IX2.DegreesOfFreedom, 2); - Assert.AreEqual(IX2.Sigma, 1); + Assert.AreEqual(2, IX2.DegreesOfFreedom); + Assert.AreEqual(1, IX2.Sigma); } /// @@ -114,10 +114,10 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var IX = new InverseChiSquared(); - Assert.AreEqual(IX.ParametersToString[0, 0], "Degrees of Freedom (ν)"); - Assert.AreEqual(IX.ParametersToString[1, 0], "Scale (σ)"); - Assert.AreEqual(IX.ParametersToString[0, 1], "10"); - Assert.AreEqual(IX.ParametersToString[1, 1], "1"); + Assert.AreEqual("Degrees of Freedom (ν)", IX.ParametersToString[0, 0]); + Assert.AreEqual("Scale (σ)", IX.ParametersToString[1, 0]); + Assert.AreEqual("10", IX.ParametersToString[0, 1]); + Assert.AreEqual("1", IX.ParametersToString[1, 1]); } /// @@ -127,10 +127,10 @@ public void Test_ParametersToString() public void Test_Mean() { var IX = new InverseChiSquared(); - Assert.AreEqual(IX.Mean, 1.25); + Assert.AreEqual(1.25, IX.Mean); var IX2 = new InverseChiSquared(2, 2); - Assert.AreEqual(IX2.Mean, double.NaN); + Assert.AreEqual(double.NaN, IX2.Mean); } /// @@ -140,10 +140,10 @@ public void Test_Mean() public void Test_Median() { var IX = new InverseChiSquared(); - Assert.AreEqual(IX.Median, 0.93418,1e-04); + Assert.AreEqual(0.93418, IX.Median,1e-04); var IX2 = new InverseChiSquared(7, 1); - Assert.AreEqual(IX2.Median, 0.906544, 1e-04); + Assert.AreEqual(0.906544, IX2.Median, 1e-04); } /// @@ -153,10 +153,10 @@ public void Test_Median() public void Test_Mode() { var IX = new InverseChiSquared(); - Assert.AreEqual(IX.Mode, 0.8333, 1e-04); + Assert.AreEqual(0.8333, IX.Mode, 1e-04); var IX2 = new InverseChiSquared(2, 2); - Assert.AreEqual(IX2.Mode, 1); + Assert.AreEqual(1, IX2.Mode); } /// @@ -166,10 +166,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var IX = new InverseChiSquared(); - Assert.AreEqual(IX.StandardDeviation, 0.72168, 1e-04); + Assert.AreEqual(0.72168, IX.StandardDeviation, 1e-04); var IX2 = new InverseChiSquared(2, 2); - Assert.AreEqual(IX2.StandardDeviation, double.NaN); + Assert.AreEqual(double.NaN,IX2.StandardDeviation ); } /// @@ -179,10 +179,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var IX = new InverseChiSquared(); - Assert.AreEqual(IX.Skewness, 3.46410, 1e-04); + Assert.AreEqual(3.46410, IX.Skewness, 1e-04); var IX2 = new InverseChiSquared(2, 2); - Assert.AreEqual(IX2.Skewness, double.NaN); + Assert.AreEqual(double.NaN,IX2.Skewness); } /// @@ -192,10 +192,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var IX = new InverseChiSquared(); - Assert.AreEqual(IX.Kurtosis, 45); + Assert.AreEqual(45,IX.Kurtosis); var IX2 = new InverseChiSquared(2,2); - Assert.AreEqual(IX2.Kurtosis, double.NaN); + Assert.AreEqual(double.NaN,IX2.Kurtosis ); } /// @@ -205,8 +205,8 @@ public void Test_Kurtosis() public void Test_MinMax() { var IX = new InverseChiSquared(); - Assert.AreEqual(IX.Minimum, 0); - Assert.AreEqual(IX.Maximum, double.PositiveInfinity); + Assert.AreEqual(0,IX.Minimum); + Assert.AreEqual(double.PositiveInfinity,IX.Maximum ); } /// @@ -216,13 +216,13 @@ public void Test_MinMax() public void Test_PDF() { var IX = new InverseChiSquared(1, 1); - Assert.AreEqual(IX.PDF(1), 0.2419,1e-04); + Assert.AreEqual(0.2419, IX.PDF(1), 1e-04); var IX2 = new InverseChiSquared(2, 1); - Assert.AreEqual(IX2.PDF(2), 0.15163, 1e-04); + Assert.AreEqual(0.15163, IX2.PDF(2), 1e-04); var IX3 = new InverseChiSquared(); - Assert.AreEqual(IX3.PDF(2), 0.16700, 1e-04); + Assert.AreEqual(0.16700, IX3.PDF(2), 1e-04); } /// @@ -232,7 +232,7 @@ public void Test_PDF() public void Test_CDF() { var IX = new InverseChiSquared(7,1); - Assert.AreEqual(IX.CDF(5), 1.1184e-05, 1e-09); + Assert.AreEqual(1.1184e-05, IX.CDF(5), 1e-09); } /// @@ -242,9 +242,9 @@ public void Test_CDF() public void Test_InverseCDF() { var IX = new InverseChiSquared(); - Assert.AreEqual(IX.InverseCDF(0), 0); - Assert.AreEqual(IX.InverseCDF(1), double.PositiveInfinity); - Assert.AreEqual(IX.InverseCDF(0.3), 1.17807,1e-04); + Assert.AreEqual(0,IX.InverseCDF(0)); + Assert.AreEqual(double.PositiveInfinity,IX.InverseCDF(1)); + Assert.AreEqual(1.17807, IX.InverseCDF(0.3), 1e-04); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_InverseGamma.cs b/Test_Numerics/Distributions/Univariate/Test_InverseGamma.cs index 91c63d1b..c8a2042c 100644 --- a/Test_Numerics/Distributions/Univariate/Test_InverseGamma.cs +++ b/Test_Numerics/Distributions/Univariate/Test_InverseGamma.cs @@ -78,12 +78,12 @@ public void Test_InverseGammaDist() public void Test_Construction() { var IG = new InverseGamma(); - Assert.AreEqual(IG.Beta, 0.5); - Assert.AreEqual(IG.Alpha, 2); + Assert.AreEqual(0.5,IG.Beta); + Assert.AreEqual(2,IG.Alpha); var IG2 = new InverseGamma(2, 4); - Assert.AreEqual(IG2.Beta, 2); - Assert.AreEqual(IG2.Alpha, 4); + Assert.AreEqual(2,IG2.Beta); + Assert.AreEqual(4, IG2.Alpha); } /// @@ -109,10 +109,10 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var IG = new InverseGamma(); - Assert.AreEqual(IG.ParametersToString[0, 0], "Scale (β)"); - Assert.AreEqual(IG.ParametersToString[1, 0], "Shape (α)"); - Assert.AreEqual(IG.ParametersToString[0, 1], "0.5"); - Assert.AreEqual(IG.ParametersToString[1, 1], "2"); + Assert.AreEqual("Scale (β)",IG.ParametersToString[0, 0] ); + Assert.AreEqual("Shape (α)", IG.ParametersToString[1, 0]); + Assert.AreEqual("0.5", IG.ParametersToString[0, 1]); + Assert.AreEqual("2", IG.ParametersToString[1, 1]); } /// @@ -122,10 +122,10 @@ public void Test_ParametersToString() public void Test_Mean() { var IG = new InverseGamma(); - Assert.AreEqual(IG.Mean, 0.5); + Assert.AreEqual(0.5, IG.Mean); var IG2 = new InverseGamma(1, 1); - Assert.AreEqual(IG2.Mean, double.NaN); + Assert.AreEqual(double.NaN,IG2.Mean ); } /// @@ -135,7 +135,7 @@ public void Test_Mean() public void Test_Median() { var IG = new InverseGamma(); - Assert.AreEqual(IG.Median, 0.2979,1e-04); + Assert.AreEqual(0.2979, IG.Median, 1e-04); } /// @@ -145,10 +145,10 @@ public void Test_Median() public void Test_Mode() { var IG = new InverseGamma(); - Assert.AreEqual(IG.Mode, 0.1666, 1e-04); + Assert.AreEqual(0.1666, IG.Mode, 1e-04); var IG2 = new InverseGamma(1, 1); - Assert.AreEqual(IG2.Mode, 0.5); + Assert.AreEqual(0.5, IG2.Mode); } /// @@ -158,10 +158,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var IG = new InverseGamma(); - Assert.AreEqual(IG.StandardDeviation, double.NaN); + Assert.AreEqual(double.NaN, IG.StandardDeviation); var IG2 = new InverseGamma(0.5, 3); - Assert.AreEqual(IG2.StandardDeviation, 0.25); + Assert.AreEqual(0.25, IG2.StandardDeviation); } /// @@ -171,10 +171,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var IG = new InverseGamma(); - Assert.AreEqual(IG.Skewness, double.NaN); + Assert.AreEqual(double.NaN, IG.Skewness); var IG2 = new InverseGamma(0.5, 4); - Assert.AreEqual(IG2.Skewness, 5.65685, 1e-04); + Assert.AreEqual(5.65685, IG2.Skewness, 1e-04); } /// @@ -184,10 +184,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var IG = new InverseGamma(); - Assert.AreEqual(IG.Kurtosis, double.NaN); + Assert.AreEqual(double.NaN,IG.Kurtosis ); var IG2 = new InverseGamma(0.5, 5); - Assert.AreEqual(IG2.Kurtosis, 45); + Assert.AreEqual(45, IG2.Kurtosis); } /// @@ -197,12 +197,12 @@ public void Test_Kurtosis() public void Test_MinMax() { var IG = new InverseGamma(); - Assert.AreEqual(IG.Minimum, 0); - Assert.AreEqual(IG.Maximum, double.PositiveInfinity); + Assert.AreEqual(0, IG.Minimum); + Assert.AreEqual(double.PositiveInfinity, IG.Maximum); var IG2 = new InverseGamma(2, 2); - Assert.AreEqual(IG2.Minimum, 0); - Assert.AreEqual(IG2.Maximum, double.PositiveInfinity); + Assert.AreEqual(0, IG2.Minimum); + Assert.AreEqual(double.PositiveInfinity, IG2.Maximum); } /// @@ -212,13 +212,13 @@ public void Test_MinMax() public void Test_PDF() { var IG = new InverseGamma(2,4); - Assert.AreEqual(IG.PDF(-2), 0); - Assert.AreEqual(IG.PDF(5), 0.00057200, 1e-07); - Assert.AreEqual(IG.PDF(0.42), 1.74443, 1e-04); + Assert.AreEqual(0,IG.PDF(-2)); + Assert.AreEqual(0.00057200, IG.PDF(5), 1e-07); + Assert.AreEqual(1.74443, IG.PDF(0.42), 1e-04); var IG2 = new InverseGamma(0.42,2.4); - Assert.AreEqual(IG2.PDF(0), double.NaN); - Assert.AreEqual(IG2.PDF(0.3), 1.48386, 1e-05); + Assert.AreEqual(double.NaN,IG2.PDF(0) ); + Assert.AreEqual(1.48386, IG2.PDF(0.3), 1e-05); } /// @@ -228,11 +228,11 @@ public void Test_PDF() public void Test_CDF() { var IG = new InverseGamma(); - Assert.AreEqual(IG.CDF(-1), 0); - Assert.AreEqual(IG.CDF(double.PositiveInfinity), 1); + Assert.AreEqual(0,IG.CDF(-1)); + Assert.AreEqual(1, IG.CDF(double.PositiveInfinity)); var IG2 = new InverseGamma(2, 2); - Assert.AreEqual(IG2.CDF(2), 0.73575,1e-04); + Assert.AreEqual(0.73575, IG2.CDF(2), 1e-04); } /// @@ -242,11 +242,11 @@ public void Test_CDF() public void Test_InverseCDF() { var IG = new InverseGamma(); - Assert.AreEqual(IG.InverseCDF(0), 0); - Assert.AreEqual(IG.InverseCDF(1),double.PositiveInfinity); + Assert.AreEqual(0, IG.InverseCDF(0)); + Assert.AreEqual(double.PositiveInfinity, IG.InverseCDF(1)); var IG2 = new InverseGamma(2, 2); - Assert.AreEqual(IG2.InverseCDF(0.3), 0.81993,1e-04); + Assert.AreEqual(0.81993, IG2.InverseCDF(0.3), 1e-04); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_KappaFour.cs b/Test_Numerics/Distributions/Univariate/Test_KappaFour.cs index b2fa34d2..dfb9538b 100644 --- a/Test_Numerics/Distributions/Univariate/Test_KappaFour.cs +++ b/Test_Numerics/Distributions/Univariate/Test_KappaFour.cs @@ -78,10 +78,10 @@ public void Test_K4_LMOM() Assert.AreEqual(h, true_h, 0.0001d); var lmom = kappa4.LinearMomentsFromParameters(kappa4.GetParameters); - Assert.AreEqual(lmom[0], 9.95751634d, 0.0001d); - Assert.AreEqual(lmom[1], 1.98224114d, 0.0001d); - Assert.AreEqual(lmom[2], 0.06380885d, 0.0001d); - Assert.AreEqual(lmom[3], 0.12442297d, 0.0001d); + Assert.AreEqual(9.95751634d, lmom[0], 0.0001d); + Assert.AreEqual(1.98224114d, lmom[1], 0.0001d); + Assert.AreEqual(0.06380885d, lmom[2], 0.0001d); + Assert.AreEqual(0.12442297d, lmom[3], 0.0001d); } /// @@ -159,16 +159,16 @@ public void Test_K4_PartialDerivatives() public void Test_Construction() { var k4 = new KappaFour(); - Assert.AreEqual(k4.Xi, 100); - Assert.AreEqual(k4.Alpha, 10); - Assert.AreEqual(k4.Kappa, 0); - Assert.AreEqual(k4.Hondo, 0); + Assert.AreEqual(100,k4.Xi ); + Assert.AreEqual(10, k4.Alpha); + Assert.AreEqual(0, k4.Kappa); + Assert.AreEqual(0, k4.Hondo); var k4ii = new KappaFour(100, 10, 1, 1); - Assert.AreEqual(k4ii.Xi, 100); - Assert.AreEqual(k4ii.Alpha, 10); - Assert.AreEqual(k4ii.Kappa, 1); - Assert.AreEqual(k4ii.Hondo, 1); + Assert.AreEqual(100, k4ii.Xi); + Assert.AreEqual(10, k4ii.Alpha); + Assert.AreEqual(1, k4ii.Kappa); + Assert.AreEqual(1, k4ii.Hondo); } /// @@ -194,14 +194,14 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var k4 = new KappaFour(); - Assert.AreEqual(k4.ParametersToString[0, 0], "Location (ξ)"); - Assert.AreEqual(k4.ParametersToString[1, 0], "Scale (α)"); - Assert.AreEqual(k4.ParametersToString[2, 0], "Shape (κ)"); - Assert.AreEqual(k4.ParametersToString[3, 0], "Shape (h)"); - Assert.AreEqual(k4.ParametersToString[0, 1], "100"); - Assert.AreEqual(k4.ParametersToString[1, 1], "10"); - Assert.AreEqual(k4.ParametersToString[2, 1], "0"); - Assert.AreEqual(k4.ParametersToString[3, 1], "0"); + Assert.AreEqual("Location (ξ)",k4.ParametersToString[0, 0] ); + Assert.AreEqual("Scale (α)", k4.ParametersToString[1, 0]); + Assert.AreEqual("Shape (κ)", k4.ParametersToString[2, 0]); + Assert.AreEqual("Shape (h)", k4.ParametersToString[3, 0]); + Assert.AreEqual("100", k4.ParametersToString[0, 1]); + Assert.AreEqual("10", k4.ParametersToString[1, 1]); + Assert.AreEqual("0", k4.ParametersToString[2, 1]); + Assert.AreEqual("0", k4.ParametersToString[3, 1]); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_LnNormal.cs b/Test_Numerics/Distributions/Univariate/Test_LnNormal.cs index 24ea8ab1..e0cceabe 100644 --- a/Test_Numerics/Distributions/Univariate/Test_LnNormal.cs +++ b/Test_Numerics/Distributions/Univariate/Test_LnNormal.cs @@ -81,8 +81,8 @@ public void Test_LnNormal_MOM_Fit() double u2 = LN.Sigma; double true_u1 = 10.7676d; double true_u2 = 0.4544d; - Assert.AreEqual((u1 - true_u1) / true_u1 < 0.01d, true); - Assert.AreEqual((u2 - true_u2) / true_u2 < 0.01d, true); + Assert.IsLessThan(0.01d,(u1 - true_u1) / true_u1); + Assert.IsLessThan(0.01d,(u2 - true_u2) / true_u2 ); } /// @@ -105,8 +105,8 @@ public void Test_LnNormal_MLE_Fit() double u2 = LN.Sigma; double true_u1 = 10.7711d; double true_u2 = 0.4562d; - Assert.AreEqual((u1 - true_u1) / true_u1 < 0.01d, true); - Assert.AreEqual((u2 - true_u2) / true_u2 < 0.01d, true); + Assert.IsLessThan(0.01d, (u1 - true_u1) / true_u1); + Assert.IsLessThan(0.01d, (u2 - true_u2) / true_u2); } /// @@ -126,10 +126,10 @@ public void Test_LnNormal_Quantile() var LN = new LnNormal() { Mu = 10.7676d, Sigma = 0.4544d }; double q100 = LN.InverseCDF(0.99d); double true_q100 = 136611d; - Assert.AreEqual((q100 - true_q100) / true_q100 < 0.01d, true); + Assert.IsLessThan(0.01d, (q100 - true_q100) / true_q100); double p = LN.CDF(q100); double true_p = 0.99d; - Assert.AreEqual((p - true_p) / true_p < 0.01d, true); + Assert.IsLessThan(0.01d, (p - true_p) / true_p); } /// @@ -151,7 +151,7 @@ public void Test_LnNormal_StandardError() LN.Estimate(sample, ParameterEstimationMethod.MaximumLikelihood); double qVar99 = Math.Sqrt(LN.QuantileVariance(0.99d, 85, ParameterEstimationMethod.MaximumLikelihood)); double true_qVar99 = 13113d; - Assert.AreEqual((qVar99 - true_qVar99) / true_qVar99 < 0.01d, true); + Assert.IsLessThan(0.01d, (qVar99 - true_qVar99) / true_qVar99); } /// @@ -178,12 +178,12 @@ public void Test_ConditionalExpectation() public void Test_Construction() { var LN = new LnNormal(); - Assert.AreEqual(LN.Mean, 10, 1E-4); - Assert.AreEqual(LN.StandardDeviation, 10, 1E-4); + Assert.AreEqual(10, LN.Mean, 1E-4); + Assert.AreEqual(10, LN.StandardDeviation, 1E-4); var LN2 = new LnNormal(1, 1); - Assert.AreEqual(LN2.Mean, 1, 1E-4); - Assert.AreEqual(LN2.StandardDeviation, 1, 1E-4); + Assert.AreEqual(1, LN2.Mean, 1E-4); + Assert.AreEqual(1, LN2.StandardDeviation, 1E-4); } /// @@ -223,10 +223,10 @@ public void Test_Moments() public void Test_Mean() { var LN = new LnNormal(); - Assert.AreEqual(LN.Mean, 1.142e26, 1e30); + Assert.AreEqual(1.142e26, LN.Mean, 1e30); var LN2 = new LnNormal(1, 1); - Assert.AreEqual(LN2.Mean, 1,1e-04); + Assert.AreEqual(1, LN2.Mean,1e-04); } /// @@ -236,10 +236,10 @@ public void Test_Mean() public void Test_Median() { var LN = new LnNormal(); - Assert.AreEqual(LN.Median, 7.07106,1e-05); + Assert.AreEqual(7.07106, LN.Median,1e-05); var LN2 = new LnNormal(1, 1); - Assert.AreEqual(LN2.Median, 0.707106, 1e-05); + Assert.AreEqual(0.707106, LN2.Median, 1e-05); } /// @@ -249,10 +249,10 @@ public void Test_Median() public void Test_Mode() { var LN = new LnNormal(); - Assert.AreEqual(LN.Mode, 3.5355, 1e-04); + Assert.AreEqual(3.5355, LN.Mode, 1e-04); var LN2 = new LnNormal(1, 1); - Assert.AreEqual(LN2.Mode, 0.35355,1e-04); + Assert.AreEqual(0.35355, LN2.Mode,1e-04); } /// @@ -262,10 +262,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var LN = new LnNormal(); - Assert.AreEqual(LN.StandardDeviation, 5.92e47, 1e49); + Assert.AreEqual(5.92e47, LN.StandardDeviation, 1e49); var LN2 = new LnNormal(1, 1); - Assert.AreEqual(LN2.StandardDeviation, 1,1e-4); + Assert.AreEqual(1, LN2.StandardDeviation,1e-4); } /// @@ -275,10 +275,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var LN = new LnNormal(); - Assert.AreEqual(LN.Skewness, 1.39e65, 1e67); + Assert.AreEqual(1.39e65, LN.Skewness, 1e67); var LN2 = new LnNormal(1, 1); - Assert.AreEqual(LN2.Skewness, 4, 1e-04); + Assert.AreEqual(4, LN2.Skewness, 1e-04); } /// @@ -288,7 +288,7 @@ public void Test_Skewness() public void Test_Kurtosis() { var LN = new LnNormal(1,1); - Assert.AreEqual(LN.Kurtosis, 41, 1e-04); + Assert.AreEqual(41, LN.Kurtosis, 1e-04); } /// @@ -298,12 +298,12 @@ public void Test_Kurtosis() public void Test_MinMax() { var LN = new LnNormal(); - Assert.AreEqual(LN.Minimum, 0); - Assert.AreEqual(LN.Maximum,double.PositiveInfinity); + Assert.AreEqual(0,LN.Minimum); + Assert.AreEqual(double.PositiveInfinity,LN.Maximum); var LN2 = new LnNormal(1, 1); - Assert.AreEqual(LN2.Minimum, 0); - Assert.AreEqual(LN2.Maximum, double.PositiveInfinity); + Assert.AreEqual(0,LN2.Minimum); + Assert.AreEqual(double.PositiveInfinity,LN2.Maximum ); } /// @@ -313,11 +313,11 @@ public void Test_MinMax() public void Test_PDF() { var LN = new LnNormal(); - Assert.AreEqual(LN.PDF(1), 0.03033, 1e-04); - Assert.AreEqual(LN.PDF(-1), 0); + Assert.AreEqual(0.03033, LN.PDF(1), 1e-04); + Assert.AreEqual(0,LN.PDF(-1)); var LN2 = new LnNormal(2.5, 2.5); - Assert.AreEqual(LN2.PDF(0.5), 0.303322, 1e-04); + Assert.AreEqual(0.303322, LN2.PDF(0.5), 1e-04); } /// @@ -327,8 +327,8 @@ public void Test_PDF() public void Test_CDF() { var LN = new LnNormal(2.5,2.5); - Assert.AreEqual(LN.CDF(0.5), 0.06465, 1e-05); - Assert.AreEqual(LN.CDF(0.8), 0.17046, 1e-05); + Assert.AreEqual(0.06465, LN.CDF(0.5), 1e-05); + Assert.AreEqual(0.17046, LN.CDF(0.8), 1e-05); } /// @@ -338,9 +338,9 @@ public void Test_CDF() public void Test_InverseCDF() { var LN = new LnNormal(); - Assert.AreEqual(LN.InverseCDF(0), 0); - Assert.AreEqual(LN.InverseCDF(1),double.PositiveInfinity); - Assert.AreEqual(LN.InverseCDF(0.5), 7.07106,1e-04); + Assert.AreEqual(0,LN.InverseCDF(0)); + Assert.AreEqual(double.PositiveInfinity,LN.InverseCDF(1)); + Assert.AreEqual(7.07106, LN.InverseCDF(0.5),1e-04); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_LogNormal.cs b/Test_Numerics/Distributions/Univariate/Test_LogNormal.cs index 95a9b47e..78466327 100644 --- a/Test_Numerics/Distributions/Univariate/Test_LogNormal.cs +++ b/Test_Numerics/Distributions/Univariate/Test_LogNormal.cs @@ -80,8 +80,8 @@ public void Test_LogNormal_MOM_Fit() double u2 = LogN.Sigma; double true_u1 = 10.716952223744224d; double true_u2 = 0.45007398831588075d; - Assert.AreEqual((u1 - true_u1) / true_u1 < 0.01d, true); - Assert.AreEqual((u2 - true_u2) / true_u2 < 0.01d, true); + Assert.IsLessThan(0.01d, (u1 - true_u1) / true_u1); + Assert.IsLessThan(0.01d, (u2 - true_u2) / true_u2); } /// @@ -101,10 +101,10 @@ public void Test_LogNormal_LMOM_Fit() Assert.AreEqual(u1, true_u1, 0.0001d); Assert.AreEqual(u2, true_u2, 0.0001d); var lmom = norm.LinearMomentsFromParameters(norm.GetParameters); - Assert.AreEqual(lmom[0], 0.96723909d, 0.0001d); - Assert.AreEqual(lmom[1], 0.09452119d, 0.0001d); - Assert.AreEqual(lmom[2], 0.00000000d, 0.0001d); - Assert.AreEqual(lmom[3], 0.12260172d, 0.0001d); + Assert.AreEqual(0.96723909d, lmom[0], 0.0001d); + Assert.AreEqual(0.09452119d, lmom[1], 0.0001d); + Assert.AreEqual(0.00000000d, lmom[2], 0.0001d); + Assert.AreEqual(0.12260172d, lmom[3], 0.0001d); } /// @@ -127,8 +127,8 @@ public void Test_LogNormal_MLE_Fit() double u2 = LogN.Sigma; double true_u1 = 10.716950857801747d; double true_u2 = 0.44742859657407796d; - Assert.AreEqual((u1 - true_u1) / true_u1 < 0.01d, true); - Assert.AreEqual((u2 - true_u2) / true_u2 < 0.01d, true); + Assert.IsLessThan(0.01d, (u1 - true_u1) / true_u1); + Assert.IsLessThan(0.01d, (u2 - true_u2) / true_u2); var LN = new LnNormal(); LN.Estimate(sample, ParameterEstimationMethod.MaximumLikelihood); } @@ -150,10 +150,10 @@ public void Test_LogNormal_Quantile() var LogN = new LogNormal() { Mu = 10.7676d, Sigma = 0.4544d, Base = Math.E }; double q100 = LogN.InverseCDF(0.99d); double true_q100 = 136611d; - Assert.AreEqual((q100 - true_q100) / true_q100 < 0.01d, true); + Assert.IsLessThan(0.01d, (q100 - true_q100) / true_q100); double p = LogN.CDF(q100); double true_p = 0.99d; - Assert.AreEqual((p - true_p) / true_p < 0.01d, true); + Assert.IsLessThan(0.01d, (p - true_p) / true_p); } /// @@ -175,7 +175,7 @@ public void Test_LogNormal_StandardError() var LogN = new LogNormal() { Mu = 10.7711d, Sigma = 0.4562d, Base = Math.E }; double qVar99 = Math.Sqrt(LogN.QuantileVariance(0.99d, 85, ParameterEstimationMethod.MaximumLikelihood)); double true_qVar99 = 13113d; - Assert.AreEqual((qVar99 - true_qVar99) / true_qVar99 < 0.01d, true); + Assert.IsLessThan(0.01d, (qVar99 - true_qVar99) / true_qVar99); } /// @@ -185,12 +185,12 @@ public void Test_LogNormal_StandardError() public void Test_Construction() { var LogN = new LogNormal(); - Assert.AreEqual(LogN.Mu, 3); - Assert.AreEqual(LogN.Sigma, 0.5); + Assert.AreEqual(3,LogN.Mu); + Assert.AreEqual(0.5,LogN.Sigma); var LogN2 = new LogNormal(1, 1); - Assert.AreEqual(LogN2.Mu, 1); - Assert.AreEqual(LogN2.Sigma, 1); + Assert.AreEqual(1,LogN2.Mu); + Assert.AreEqual(1,LogN2.Sigma); } /// @@ -216,10 +216,10 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var LogN = new LogNormal(); - Assert.AreEqual(LogN.ParametersToString[0, 0], "Mean (of log) (µ)"); - Assert.AreEqual(LogN.ParametersToString[1, 0], "Std Dev (of log) (σ)"); - Assert.AreEqual(LogN.ParametersToString[0, 1], "3"); - Assert.AreEqual(LogN.ParametersToString[1, 1], "0.5"); + Assert.AreEqual("Mean (of log) (µ)",LogN.ParametersToString[0, 0]); + Assert.AreEqual("Std Dev (of log) (σ)",LogN.ParametersToString[1, 0] ); + Assert.AreEqual("3",LogN.ParametersToString[0, 1] ); + Assert.AreEqual("0.5", LogN.ParametersToString[1, 1]); } /// @@ -243,12 +243,12 @@ public void Test_Moments() public void Test_MinMax() { var LogN = new LogNormal(); - Assert.AreEqual(LogN.Minimum, 0); - Assert.AreEqual(LogN.Maximum, double.PositiveInfinity); + Assert.AreEqual(0,LogN.Minimum); + Assert.AreEqual(double.PositiveInfinity,LogN.Maximum ); var LogN2 = new LogNormal(1, 1); - Assert.AreEqual(LogN2.Minimum, 0); - Assert.AreEqual(LogN2.Maximum, double.PositiveInfinity); + Assert.AreEqual(0, LogN2.Minimum); + Assert.AreEqual(double.PositiveInfinity, LogN2.Maximum); } /// @@ -258,10 +258,10 @@ public void Test_MinMax() public void Test_PDF() { var LogN = new LogNormal(1.5,0.1); - Assert.AreEqual(LogN.PDF(0.1), 3.32e-135,1e-04); + Assert.AreEqual(3.32e-135, LogN.PDF(0.1), 1e-04); var LogN2 = new LogNormal(-0.1, 0.1); - Assert.AreEqual(LogN.PDF(0.8), 9.12888e-56, 1e-04); + Assert.AreEqual(9.12888e-56, LogN.PDF(0.8), 1e-04); } /// @@ -271,10 +271,10 @@ public void Test_PDF() public void Test_CDF() { var LogN = new LogNormal(1.5, 0.1); - Assert.AreEqual(LogN.CDF(0.1), 0); + Assert.AreEqual(0,LogN.CDF(0.1)); var LogN2 = new LogNormal(1.5, 1.5); - Assert.AreEqual(LogN2.CDF(0.5), 0.11493, 1e-05); + Assert.AreEqual(0.11493, LogN2.CDF(0.5), 1e-05); } /// @@ -284,10 +284,10 @@ public void Test_CDF() public void Test_InverseCDF() { var LogN = new LogNormal(2.5, 2.5); - Assert.AreEqual(LogN.InverseCDF(0.8), 40183.99248, 1e-04); + Assert.AreEqual(40183.99248, LogN.InverseCDF(0.8), 1e-04); var LogN2 = new LogNormal(1.5, 2.5); - Assert.AreEqual(LogN.InverseCDF(0.8), 40183.99248, 1e-05); + Assert.AreEqual(40183.99248, LogN.InverseCDF(0.8), 1e-05); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_LogPearsonTypeIII.cs b/Test_Numerics/Distributions/Univariate/Test_LogPearsonTypeIII.cs index 9d368a2c..0fd4fb62 100644 --- a/Test_Numerics/Distributions/Univariate/Test_LogPearsonTypeIII.cs +++ b/Test_Numerics/Distributions/Univariate/Test_LogPearsonTypeIII.cs @@ -94,15 +94,15 @@ public void Test_LP3_IndirectMOM() double true_mean = 191.38768d; double true_stDev = 47.62977d; double true_skew = 0.71589d; - Assert.AreEqual((xi - true_xi) / true_xi < 0.01d, true); - Assert.AreEqual((beta - true_beta) / true_beta < 0.01d, true); - Assert.AreEqual((alpha - true_alpha) / true_alpha < 0.01d, true); - Assert.AreEqual((meanOfLog - true_meanOfLog) / true_meanOfLog < 0.01d, true); - Assert.AreEqual((stDevOfLog - true_stDevOfLog) / true_stDevOfLog < 0.01d, true); - Assert.AreEqual((skewOfLog - true_skewOfLog) / true_skewOfLog < 0.01d, true); - Assert.AreEqual((mean - true_mean) / true_mean < 0.01d, true); - Assert.AreEqual((stDev - true_stDev) / true_stDev < 0.01d, true); - Assert.AreEqual((skew - true_skew) / true_skew < 0.01d, true); + Assert.IsLessThan(0.01d,(xi - true_xi) / true_xi); + Assert.IsLessThan(0.01d,(beta - true_beta) / true_beta); + Assert.IsLessThan(0.01d, (alpha - true_alpha) / true_alpha); + Assert.IsLessThan(0.01d, (meanOfLog - true_meanOfLog) / true_meanOfLog); + Assert.IsLessThan(0.01d, (stDevOfLog - true_stDevOfLog) / true_stDevOfLog); + Assert.IsLessThan(0.01d, (skewOfLog - true_skewOfLog) / true_skewOfLog); + Assert.IsLessThan(0.01d, (mean - true_mean) / true_mean); + Assert.IsLessThan(0.01d, (stDev - true_stDev) / true_stDev); + Assert.IsLessThan(0.01d, (skew - true_skew) / true_skew ); } /// @@ -139,15 +139,15 @@ public void Test_LP3_MLE() double true_mean = 191.30891d; double true_stDev = 47.32124d; double true_skew = 0.72396d; - Assert.AreEqual((xi - true_xi) / true_xi < 0.01d, true); - Assert.AreEqual((beta - true_beta) / true_beta < 0.01d, true); - Assert.AreEqual((alpha - true_alpha) / true_alpha < 0.01d, true); - Assert.AreEqual((meanOfLog - true_meanOfLog) / true_meanOfLog < 0.01d, true); - Assert.AreEqual((stDevOfLog - true_stDevOfLog) / true_stDevOfLog < 0.01d, true); - Assert.AreEqual((skewOfLog - true_skewOfLog) / true_skewOfLog < 0.01d, true); - Assert.AreEqual((mean - true_mean) / true_mean < 0.01d, true); - Assert.AreEqual((stDev - true_stDev) / true_stDev < 0.01d, true); - Assert.AreEqual((skew - true_skew) / true_skew < 0.01d, true); + Assert.IsLessThan(0.01d, (xi - true_xi) / true_xi); + Assert.IsLessThan(0.01d, (beta - true_beta) / true_beta); + Assert.IsLessThan(0.01d, (alpha - true_alpha) / true_alpha); + Assert.IsLessThan(0.01d, (meanOfLog - true_meanOfLog) / true_meanOfLog); + Assert.IsLessThan(0.01d, (stDevOfLog - true_stDevOfLog) / true_stDevOfLog); + Assert.IsLessThan(0.01d, (skewOfLog - true_skewOfLog) / true_skewOfLog); + Assert.IsLessThan(0.01d, (mean - true_mean) / true_mean); + Assert.IsLessThan(0.01d, (stDev - true_stDev) / true_stDev); + Assert.IsLessThan(0.01d, (skew - true_skew) / true_skew); } /// @@ -167,10 +167,10 @@ public void Test_LP3_Quantile() var LP3 = new LogPearsonTypeIII(2.26878d, 0.10621d, -0.02925d); double q1000 = LP3.InverseCDF(0.99d); double true_q1000 = 326.25d; - Assert.AreEqual((q1000 - true_q1000) / true_q1000 < 0.01d, true); + Assert.IsLessThan(0.01d, (q1000 - true_q1000) / true_q1000); double p = LP3.CDF(q1000); double true_p = 0.99d; - Assert.AreEqual((p - true_p) / true_p < 0.01d, true); + Assert.IsLessThan(0.01d, (p - true_p) / true_p); } /// @@ -192,13 +192,13 @@ public void Test_LP3_StandardError() var LP3 = new LogPearsonTypeIII(2.26878d, 0.10699d, -0.04061d); double qVar999 = Math.Sqrt(LP3.QuantileVariance(0.99d, 69, ParameterEstimationMethod.MethodOfMoments)); double true_qVar999 = 25.053d; - Assert.AreEqual((qVar999 - true_qVar999) / true_qVar999 < 0.01d, true); + Assert.IsLessThan(0.01d, (qVar999 - true_qVar999) / true_qVar999); // Maximum Likelihood LP3 = new LogPearsonTypeIII(2.26878d, 0.10621d, -0.02925d); qVar999 = Math.Sqrt(LP3.QuantileVariance(0.99d, 69, ParameterEstimationMethod.MaximumLikelihood)); true_qVar999 = 25d; - Assert.AreEqual((qVar999 - true_qVar999) / true_qVar999 < 0.01d, true); + Assert.IsLessThan(0.01d, (qVar999 - true_qVar999) / true_qVar999); } @@ -209,9 +209,9 @@ public void Test_LP3_StandardError() public void Test_Construction() { var LP3 = new LogPearsonTypeIII(); - Assert.AreEqual(LP3.Mu, 3); - Assert.AreEqual(LP3.Sigma, 0.5); - Assert.AreEqual(LP3.Gamma, 0); + Assert.AreEqual(3,LP3.Mu); + Assert.AreEqual(0.5,LP3.Sigma); + Assert.AreEqual(0, LP3.Gamma); } /// @@ -234,12 +234,12 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var LP3 = new LogPearsonTypeIII(); - Assert.AreEqual(LP3.ParametersToString[0, 0], "Mean (of log) (µ)"); - Assert.AreEqual(LP3.ParametersToString[1, 0], "Std Dev (of log) (σ)"); - Assert.AreEqual(LP3.ParametersToString[2, 0], "Skew (of log) (γ)"); - Assert.AreEqual(LP3.ParametersToString[0, 1], "3"); - Assert.AreEqual(LP3.ParametersToString[1, 1], "0.5"); - Assert.AreEqual(LP3.ParametersToString[2, 1], "0"); + Assert.AreEqual("Mean (of log) (µ)",LP3.ParametersToString[0, 0] ); + Assert.AreEqual("Std Dev (of log) (σ)", LP3.ParametersToString[1, 0]); + Assert.AreEqual("Skew (of log) (γ)", LP3.ParametersToString[2, 0]); + Assert.AreEqual("3", LP3.ParametersToString[0, 1]); + Assert.AreEqual("0.5", LP3.ParametersToString[1, 1]); + Assert.AreEqual("0", LP3.ParametersToString[2, 1]); } /// @@ -263,7 +263,7 @@ public void Test_Moments() public void Test_Median() { var LP3 = new LogPearsonTypeIII(); - Assert.AreEqual(LP3.Median, 1000, 1e-04); + Assert.AreEqual(1000, LP3.Median, 1e-04); } /// @@ -273,10 +273,10 @@ public void Test_Median() public void Test_Mode() { var LP3 = new LogPearsonTypeIII(); - Assert.AreEqual(LP3.Mode, 1000, 1e-04); + Assert.AreEqual(1000, LP3.Mode, 1e-04); var LP3ii = new LogPearsonTypeIII(1, 1, 1); - Assert.AreEqual(LP3ii.Mode, 3.16227, 1e-04); + Assert.AreEqual(3.16227, LP3ii.Mode, 1e-04); } /// @@ -286,13 +286,13 @@ public void Test_Mode() public void Test_Minimum() { var LP3 = new LogPearsonTypeIII(); - Assert.AreEqual(LP3.Minimum, 0); + Assert.AreEqual(0, LP3.Minimum); var LP3ii = new LogPearsonTypeIII(1,1,1); - Assert.AreEqual(LP3ii.Minimum, 0.1, 1e-05); + Assert.AreEqual(0.1, LP3ii.Minimum, 1e-05); var LP3iii = new LogPearsonTypeIII(1, -1, 1); - Assert.AreEqual(LP3iii.Minimum, 0); + Assert.AreEqual(0, LP3iii.Minimum); } /// @@ -302,13 +302,13 @@ public void Test_Minimum() public void Test_Maximum() { var LP3 = new LogPearsonTypeIII(); - Assert.AreEqual(LP3.Maximum, double.PositiveInfinity); + Assert.AreEqual(double.PositiveInfinity,LP3.Maximum ); var LP3ii = new LogPearsonTypeIII(1,1,1); - Assert.AreEqual(LP3ii.Maximum, double.PositiveInfinity); + Assert.AreEqual(double.PositiveInfinity, LP3ii.Maximum); var LP3iii = new LogPearsonTypeIII(1, -1, 1); - Assert.AreEqual(LP3iii.Maximum, 1000,1e-04); + Assert.AreEqual(1000, LP3iii.Maximum, 1e-04); } /// @@ -318,8 +318,8 @@ public void Test_Maximum() public void Test_PDF() { var LP3 = new LogPearsonTypeIII(); - Assert.AreEqual(LP3.PDF(-1), 0); - Assert.AreEqual(LP3.PDF(1),5.2774e-09,1e-13); + Assert.AreEqual(0, LP3.PDF(-1)); + Assert.AreEqual(5.2774e-09, LP3.PDF(1),1e-13); } /// @@ -329,8 +329,8 @@ public void Test_PDF() public void Test_CDF() { var LP3 = new LogPearsonTypeIII(); - Assert.AreEqual(LP3.CDF(-1), 0); - Assert.AreEqual(LP3.CDF(1), 9.8658e-10,1e-13); + Assert.AreEqual(0, LP3.CDF(-1)); + Assert.AreEqual(9.8658e-10, LP3.CDF(1), 1e-13); } /// @@ -340,9 +340,9 @@ public void Test_CDF() public void Test_InverseCDF() { var LP3 = new LogPearsonTypeIII(); - Assert.AreEqual(LP3.InverseCDF(0), 0); - Assert.AreEqual(LP3.InverseCDF(1), double.PositiveInfinity); - Assert.AreEqual(LP3.InverseCDF(0.3), 546.7637,1e-04); + Assert.AreEqual(0,LP3.InverseCDF(0)); + Assert.AreEqual(double.PositiveInfinity,LP3.InverseCDF(1) ); + Assert.AreEqual(546.7637, LP3.InverseCDF(0.3), 1e-04); } /// @@ -352,9 +352,9 @@ public void Test_InverseCDF() public void ValidateWilsonHilfertyInverseCDF() { var LP3 = new LogPearsonTypeIII(); - Assert.AreEqual(LP3.WilsonHilfertyInverseCDF(0), 0); - Assert.AreEqual(LP3.WilsonHilfertyInverseCDF(1),double.PositiveInfinity); - Assert.AreEqual(LP3.WilsonHilfertyInverseCDF(0.4), 747.01005,1e-05); + Assert.AreEqual(0, LP3.WilsonHilfertyInverseCDF(0)); + Assert.AreEqual(double.PositiveInfinity, LP3.WilsonHilfertyInverseCDF(1)); + Assert.AreEqual(747.01005, LP3.WilsonHilfertyInverseCDF(0.4),1e-05); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_Logistic.cs b/Test_Numerics/Distributions/Univariate/Test_Logistic.cs index 744c18d0..48ad38d9 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Logistic.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Logistic.cs @@ -80,8 +80,8 @@ public void Test_Logistic_MOM_Fit() double a = LO.Alpha; double true_x = 12665d; double true_a = 2596.62d; - Assert.AreEqual((x - true_x) / true_x < 0.01d, true); - Assert.AreEqual((a - true_a) / true_a < 0.01d, true); + Assert.IsLessThan(0.01d,(x - true_x) / true_x ); + Assert.IsLessThan(0.01d,(a - true_a) / true_a); } /// @@ -104,8 +104,8 @@ public void Test_Logistic_MLE_Fit() double a = LO.Alpha; double true_x = 12628.59d; double true_a = 2708.64d; - Assert.AreEqual((x - true_x) / true_x < 0.01d, true); - Assert.AreEqual((a - true_a) / true_a < 0.01d, true); + Assert.IsLessThan(0.01d, (x - true_x) / true_x); + Assert.IsLessThan(0.01d, (a - true_a) / true_a); } /// @@ -125,10 +125,10 @@ public void Test_Logistic_Quantile() var LO = new Logistic(12665d, 2596.62d); double q100 = LO.InverseCDF(0.99d); double true_100 = 24597d; - Assert.AreEqual((q100 - true_100) / true_100 < 0.01d, true); + Assert.IsLessThan(0.01d, (q100 - true_100) / true_100 ); double p = LO.CDF(q100); double true_p = 0.99d; - Assert.AreEqual(p == true_p, true); + Assert.AreEqual(p, true_p); } /// @@ -150,13 +150,13 @@ public void Test_Logistic_StandardError() var LO = new Logistic(12665d, 2596.62d); double se100 = Math.Sqrt(LO.QuantileVariance(0.99d, 48, ParameterEstimationMethod.MethodOfMoments)); double true_se100 = 1684d; - Assert.AreEqual((se100 - true_se100) / true_se100 < 0.01d, true); + Assert.IsLessThan(0.01d,(se100 - true_se100) / true_se100); // Maximum Likelihood LO = new Logistic(12628.59d, 2708.64d); se100 = Math.Sqrt(LO.QuantileVariance(0.99d, 48, ParameterEstimationMethod.MaximumLikelihood)); true_se100 = 1648d; - Assert.AreEqual((se100 - true_se100) / true_se100 < 0.01d, true); + Assert.IsLessThan(0.01d,(se100 - true_se100) / true_se100); } /// @@ -166,12 +166,12 @@ public void Test_Logistic_StandardError() public void Test_Construction() { var LO = new Logistic(); - Assert.AreEqual(LO.Xi, 0); - Assert.AreEqual(LO.Alpha, 0.1); + Assert.AreEqual(0,LO.Xi); + Assert.AreEqual(0.1,LO.Alpha); var LO2 = new Logistic(1, 1); - Assert.AreEqual(LO2.Xi, 1); - Assert.AreEqual(LO2.Alpha, 1); + Assert.AreEqual(1,LO2.Xi); + Assert.AreEqual(1, LO2.Alpha); } /// @@ -197,10 +197,10 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var LO = new Logistic(); - Assert.AreEqual(LO.ParametersToString[0, 0], "Location (ξ)"); - Assert.AreEqual(LO.ParametersToString[1, 0], "Scale (α)"); - Assert.AreEqual(LO.ParametersToString[0, 1], "0"); - Assert.AreEqual(LO.ParametersToString[1, 1], "0.1"); + Assert.AreEqual("Location (ξ)",LO.ParametersToString[0, 0] ); + Assert.AreEqual("Scale (α)", LO.ParametersToString[1, 0]); + Assert.AreEqual("0", LO.ParametersToString[0, 1]); + Assert.AreEqual("0.1", LO.ParametersToString[1, 1]); } /// @@ -224,10 +224,10 @@ public void Test_Moments() public void Test_Mean() { var LO = new Logistic(); - Assert.AreEqual(LO.Mean, 0); + Assert.AreEqual(0, LO.Mean); var LO2 = new Logistic(1,1); - Assert.AreEqual(LO2.Mean, 1); + Assert.AreEqual(1, LO2.Mean); } /// @@ -237,10 +237,10 @@ public void Test_Mean() public void Test_Median() { var LO = new Logistic(); - Assert.AreEqual(LO.Median,0); + Assert.AreEqual(0, LO.Median); var LO2 = new Logistic(1, 1); - Assert.AreEqual(LO2.Median, 1); + Assert.AreEqual(1, LO2.Median); } /// @@ -250,10 +250,10 @@ public void Test_Median() public void Test_Mode() { var LO = new Logistic(); - Assert.AreEqual(LO.Mode, 0); + Assert.AreEqual(0, LO.Mode); var LO2 = new Logistic(1, 1); - Assert.AreEqual(LO2.Mode, 1); + Assert.AreEqual(1, LO2.Mode); } /// @@ -263,10 +263,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var LO = new Logistic(); - Assert.AreEqual(LO.StandardDeviation, 0.18137, 1E-04); + Assert.AreEqual(0.18137, LO.StandardDeviation, 1E-04); var LO2 = new Logistic(1, 1); - Assert.AreEqual(LO2.StandardDeviation, 1.81379, 1e-04); + Assert.AreEqual(1.81379, LO2.StandardDeviation, 1e-04); } /// @@ -276,10 +276,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var LO = new Logistic(); - Assert.AreEqual(LO.Skewness, 0); + Assert.AreEqual(0, LO.Skewness); var LO2 = new Logistic(1, 1); - Assert.AreEqual(LO2.Skewness, 0); + Assert.AreEqual(0, LO2.Skewness); } /// @@ -289,10 +289,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var LO = new Logistic(); - Assert.AreEqual(LO.Kurtosis, 4.2); + Assert.AreEqual(4.2, LO.Kurtosis); var LO2 = new Logistic(1, 1); - Assert.AreEqual(LO2.Kurtosis, 4.2); + Assert.AreEqual(4.2, LO2.Kurtosis); } /// @@ -302,12 +302,12 @@ public void Test_Kurtosis() public void Test_MinMax() { var LO = new Logistic(); - Assert.AreEqual(LO.Minimum, double.NegativeInfinity); - Assert.AreEqual(LO.Maximum,double.PositiveInfinity); + Assert.AreEqual(double.NegativeInfinity,LO.Minimum ); + Assert.AreEqual(double.PositiveInfinity, LO.Maximum); var LO2 = new Logistic(1, 1); - Assert.AreEqual(LO2.Minimum, double.NegativeInfinity); - Assert.AreEqual(LO2.Maximum, double.PositiveInfinity); + Assert.AreEqual(double.NegativeInfinity, LO2.Minimum); + Assert.AreEqual(double.PositiveInfinity, LO2.Maximum); } /// @@ -317,9 +317,9 @@ public void Test_MinMax() public void Test_PDF() { var LO = new Logistic(5,2); - Assert.AreEqual(LO.PDF(-5), 0.00332, 1e-04); - Assert.AreEqual(LO.PDF(0), 0.03505, 1e-04); - Assert.AreEqual(LO.PDF(5), 0.125); + Assert.AreEqual(0.00332, LO.PDF(-5), 1e-04); + Assert.AreEqual(0.03505, LO.PDF(0), 1e-04); + Assert.AreEqual(0.125,LO.PDF(5) ); } /// @@ -329,9 +329,9 @@ public void Test_PDF() public void Test_CDF() { var LO = new Logistic(5,2); - Assert.AreEqual(LO.CDF(-5), 0.00669, 1e-05); - Assert.AreEqual(LO.CDF(0), 0.07585, 1e-04); - Assert.AreEqual(LO.CDF(5), 0.5); + Assert.AreEqual(0.00669, LO.CDF(-5), 1e-05); + Assert.AreEqual(0.07585, LO.CDF(0), 1e-04); + Assert.AreEqual(0.5,LO.CDF(5) ); } /// @@ -341,9 +341,9 @@ public void Test_CDF() public void Test_InverseCDF() { var LO = new Logistic(5, 2); - Assert.AreEqual(LO.InverseCDF(0), double.NegativeInfinity); - Assert.AreEqual(LO.InverseCDF(1),double.PositiveInfinity); - Assert.AreEqual(LO.InverseCDF(0.3), 3.3054, 1e-04); + Assert.AreEqual(double.NegativeInfinity, LO.InverseCDF(0)); + Assert.AreEqual(double.PositiveInfinity, LO.InverseCDF(1)); + Assert.AreEqual(3.3054, LO.InverseCDF(0.3), 1e-04); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_NoncentralT.cs b/Test_Numerics/Distributions/Univariate/Test_NoncentralT.cs index f0ba56a6..7fb40330 100644 --- a/Test_Numerics/Distributions/Univariate/Test_NoncentralT.cs +++ b/Test_Numerics/Distributions/Univariate/Test_NoncentralT.cs @@ -114,12 +114,12 @@ public void Test_NoncentralT_InverseCDF() public void Test_Construction() { var t = new NoncentralT(); - Assert.AreEqual(t.DegreesOfFreedom, 10); - Assert.AreEqual(t.Noncentrality, 0); + Assert.AreEqual(10,t.DegreesOfFreedom ); + Assert.AreEqual(0, t.Noncentrality); var t2 = new NoncentralT(1, 1); - Assert.AreEqual(t2.DegreesOfFreedom, 1); - Assert.AreEqual(t2.Noncentrality, 1); + Assert.AreEqual(1, t2.DegreesOfFreedom); + Assert.AreEqual(1, t2.Noncentrality); } /// @@ -145,10 +145,10 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var t = new NoncentralT(); - Assert.AreEqual(t.ParametersToString[0, 0], "Degrees of Freedom (ν)"); - Assert.AreEqual(t.ParametersToString[1, 0], "Noncentrality (μ)"); - Assert.AreEqual(t.ParametersToString[0, 1], "10"); - Assert.AreEqual(t.ParametersToString[1, 1], "0"); + Assert.AreEqual("Degrees of Freedom (ν)", t.ParametersToString[0, 0]); + Assert.AreEqual("Noncentrality (μ)", t.ParametersToString[1, 0]); + Assert.AreEqual("10", t.ParametersToString[0, 1]); + Assert.AreEqual("0", t.ParametersToString[1, 1]); } /// @@ -158,10 +158,10 @@ public void Test_ParametersToString() public void Test_Mean() { var t = new NoncentralT(); - Assert.AreEqual(t.Mean, 0); + Assert.AreEqual(0, t.Mean); var t2 = new NoncentralT(0, 1); - Assert.AreEqual(t2.Mean, double.NaN); + Assert.AreEqual(double.NaN, t2.Mean); } /// @@ -171,10 +171,10 @@ public void Test_Mean() public void Test_Median() { var t = new NoncentralT(); - Assert.AreEqual(t.Median, 0,1e-04); + Assert.AreEqual(0, t.Median, 1e-04); var t2 = new NoncentralT(1, 1); - Assert.AreEqual(t2.Median, 1.3202,1e-04); + Assert.AreEqual(1.3202, t2.Median, 1e-04); } /// @@ -184,10 +184,10 @@ public void Test_Median() public void Test_Mode() { var t = new NoncentralT(); - Assert.AreEqual(t.Mode, 0, 1E-4); + Assert.AreEqual(0, t.Mode, 1E-4); var t3 = new NoncentralT(10, 1); - Assert.AreEqual(t3.Mode, 0.9329, 1e-04); + Assert.AreEqual(0.9329, t3.Mode, 1e-04); } /// @@ -197,10 +197,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var t = new NoncentralT(); - Assert.AreEqual(t.StandardDeviation,1.1180,1e-04); + Assert.AreEqual(1.1180, t.StandardDeviation,1e-04); var t2 = new NoncentralT(1, 0); - Assert.AreEqual(t2.StandardDeviation, double.NaN); + Assert.AreEqual(double.NaN,t2.StandardDeviation); } /// @@ -210,7 +210,7 @@ public void Test_StandardDeviation() public void Test_Skewness() { var t = new NoncentralT(); - Assert.AreEqual(t.Skewness, 0.0, 1E-4); + Assert.AreEqual(0.0, t.Skewness, 1E-4); } /// @@ -220,7 +220,7 @@ public void Test_Skewness() public void Test_Kurtosis() { var t = new NoncentralT(); - Assert.AreEqual(t.Kurtosis, 4.0, 1E-4); + Assert.AreEqual(4.0, t.Kurtosis, 1E-4); } /// @@ -230,12 +230,12 @@ public void Test_Kurtosis() public void Test_MinMax() { var t = new NoncentralT(); - Assert.AreEqual(t.Minimum,double.NegativeInfinity); - Assert.AreEqual(t.Maximum,double.PositiveInfinity); + Assert.AreEqual(double.NegativeInfinity,t.Minimum); + Assert.AreEqual(double.PositiveInfinity, t.Maximum); var t2 = new NoncentralT(1, 1); - Assert.AreEqual(t2.Minimum, double.NegativeInfinity); - Assert.AreEqual(t2.Maximum, double.PositiveInfinity); + Assert.AreEqual(double.NegativeInfinity, t2.Minimum); + Assert.AreEqual(double.PositiveInfinity, t2.Maximum); } /// @@ -245,8 +245,8 @@ public void Test_MinMax() public void Test_PDF() { var t = new NoncentralT(); - Assert.AreEqual(t.PDF(0), 0.38910,1e-04); - Assert.AreEqual(t.PDF(1),0.23036,1e-04); + Assert.AreEqual(0.38910, t.PDF(0),1e-04); + Assert.AreEqual(0.23036, t.PDF(1),1e-04); } /// @@ -256,7 +256,7 @@ public void Test_PDF() public void Test_CDF() { var t = new NoncentralT(); - Assert.AreEqual(t.CDF(1), 0.82955,1e-04); + Assert.AreEqual(0.82955, t.CDF(1),1e-04); } /// @@ -266,9 +266,9 @@ public void Test_CDF() public void Test_InverseCDF() { var t = new NoncentralT(); - Assert.AreEqual(t.InverseCDF(0), double.NegativeInfinity); - Assert.AreEqual(t.InverseCDF(1),double.PositiveInfinity); - Assert.AreEqual(t.InverseCDF(0.4), -0.26018,1e-04); + Assert.AreEqual(double.NegativeInfinity,t.InverseCDF(0) ); + Assert.AreEqual(double.PositiveInfinity, t.InverseCDF(1)); + Assert.AreEqual(-0.26018, t.InverseCDF(0.4), 1e-04); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_Normal.cs b/Test_Numerics/Distributions/Univariate/Test_Normal.cs index 0fd5972f..3703e7a2 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Normal.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Normal.cs @@ -82,8 +82,8 @@ public void Test_Normal_MOM_Fit() double u2 = norm.Sigma; double true_u1 = 12665d; double true_u2 = 4710d; - Assert.AreEqual((u1 - true_u1) / true_u1 < 0.01d, true); - Assert.AreEqual((u2 - true_u2) / true_u2 < 0.01d, true); + Assert.IsLessThan(0.01d,(u1 - true_u1) / true_u1); + Assert.IsLessThan(0.01d,(u2 - true_u2) / true_u2 ); } @@ -104,10 +104,10 @@ public void Test_Normal_LMOM_Fit() Assert.AreEqual(u1, true_u1, 0.0001d); Assert.AreEqual(u2, true_u2, 0.0001d); var lmom = norm.LinearMomentsFromParameters(norm.GetParameters); - Assert.AreEqual(lmom[0], 9.9575163d, 0.0001d); - Assert.AreEqual(lmom[1], 1.9822411d, 0.0001d); - Assert.AreEqual(lmom[2], 0.0000000d, 0.0001d); - Assert.AreEqual(lmom[3], 0.1226017d, 0.0001d); + Assert.AreEqual(9.9575163d, lmom[0], 0.0001d); + Assert.AreEqual(1.9822411d, lmom[1], 0.0001d); + Assert.AreEqual(0.0000000d, lmom[2], 0.0001d); + Assert.AreEqual(0.1226017d, lmom[3], 0.0001d); } /// @@ -148,10 +148,10 @@ public void Test_Normal_Quantile() var N = new Normal(12665d, 4710d); double q100 = N.InverseCDF(0.99d); double true_q100 = 23624d; - Assert.AreEqual((q100 - true_q100) / true_q100 < 0.01d, true); + Assert.IsLessThan(0.01d, (q100 - true_q100) / true_q100); double p = N.CDF(q100); double true_p = 0.99d; - Assert.AreEqual((p - true_p) / true_p < 0.01d, true); + Assert.IsLessThan(0.01d, (p - true_p) / true_p); } /// @@ -174,7 +174,7 @@ public void Test_Normal_StandardError() var N = new Normal(12665d, 4710d); double qVar99 = Math.Sqrt(N.QuantileVariance(0.99d, 48, ParameterEstimationMethod.MaximumLikelihood)); double true_qVar99 = 1309d; - Assert.AreEqual((qVar99 - true_qVar99) / true_qVar99 < 0.01d, true); + Assert.IsLessThan(0.01d, (qVar99 - true_qVar99) / true_qVar99); } /// @@ -184,12 +184,12 @@ public void Test_Normal_StandardError() public void Test_Construction() { var N = new Normal(); - Assert.AreEqual(N.Mu, 0); - Assert.AreEqual(N.Sigma, 1); + Assert.AreEqual(0,N.Mu); + Assert.AreEqual(1,N.Sigma); var N2 = new Normal(1, 1); - Assert.AreEqual(N2.Mu, 1); - Assert.AreEqual(N2.Sigma,1); + Assert.AreEqual(1,N2.Mu); + Assert.AreEqual(1,N2.Sigma); } /// @@ -218,10 +218,10 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var N = new Normal(); - Assert.AreEqual(N.ParametersToString[0, 0], "Mean (µ)"); - Assert.AreEqual(N.ParametersToString[1, 0], "Std Dev (σ)"); - Assert.AreEqual(N.ParametersToString[0, 1], "0"); - Assert.AreEqual(N.ParametersToString[1,1], "1"); + Assert.AreEqual("Mean (µ)",N.ParametersToString[0, 0]); + Assert.AreEqual("Std Dev (σ)",N.ParametersToString[1, 0] ); + Assert.AreEqual("0",N.ParametersToString[0, 1]); + Assert.AreEqual("1",N.ParametersToString[1,1] ); } /// @@ -245,10 +245,10 @@ public void Test_Moments() public void Test_Mean() { var N = new Normal(); - Assert.AreEqual(N.Mean, 0); + Assert.AreEqual(0,N.Mean); var N2 = new Normal(5, 9); - Assert.AreEqual(N2.Mean, 5); + Assert.AreEqual(5, N2.Mean); } /// @@ -258,10 +258,10 @@ public void Test_Mean() public void Test_Median() { var N = new Normal(); - Assert.AreEqual(N.Median, 0); + Assert.AreEqual(0, N.Median); var N2 = new Normal(5, 9); - Assert.AreEqual(N2.Median, 5); + Assert.AreEqual(5, N2.Median); } /// @@ -271,10 +271,10 @@ public void Test_Median() public void Test_Mode() { var N = new Normal(); - Assert.AreEqual(N.Mode, 0); + Assert.AreEqual(0, N.Mode); var N2 = new Normal(5, 9); - Assert.AreEqual(N2.Mode, 5); + Assert.AreEqual(5, N2.Mode); } /// @@ -284,10 +284,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var N = new Normal(); - Assert.AreEqual(N.StandardDeviation, 1); + Assert.AreEqual(1, N.StandardDeviation); var N2 = new Normal(5, 9); - Assert.AreEqual(N2.StandardDeviation, 9); + Assert.AreEqual(9, N2.StandardDeviation); } /// @@ -297,10 +297,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var N = new Normal(); - Assert.AreEqual(N.Skewness, 0); + Assert.AreEqual(0, N.Skewness); var N2 = new Normal(5, 9); - Assert.AreEqual(N2.Skewness, 0); + Assert.AreEqual(0, N2.Skewness); } /// @@ -310,10 +310,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var N = new Normal(); - Assert.AreEqual(N.Kurtosis, 3); + Assert.AreEqual(3, N.Kurtosis); var N2 = new Normal(5, 9); - Assert.AreEqual(N2.Kurtosis, 3); + Assert.AreEqual(3, N2.Kurtosis); } /// @@ -323,12 +323,12 @@ public void Test_Kurtosis() public void Test_MinMax() { var N = new Normal(); - Assert.AreEqual(N.Minimum, double.NegativeInfinity); - Assert.AreEqual(N.Maximum, double.PositiveInfinity); + Assert.AreEqual(double.NegativeInfinity,N.Minimum ); + Assert.AreEqual(double.PositiveInfinity, N.Maximum); var N2 = new Normal(5, 9); - Assert.AreEqual(N2.Minimum,double.NegativeInfinity); - Assert.AreEqual(N2.Maximum,double.PositiveInfinity); + Assert.AreEqual(double.NegativeInfinity, N2.Minimum); + Assert.AreEqual(double.PositiveInfinity, N2.Maximum); } /// @@ -338,11 +338,11 @@ public void Test_MinMax() public void Test_PDF() { var N = new Normal(); - Assert.AreEqual(N.PDF(0), 0.39894, 1e-04); - Assert.AreEqual(N.PDF(1), 0.24197, 1e-04); + Assert.AreEqual(0.39894, N.PDF(0), 1e-04); + Assert.AreEqual(0.24197, N.PDF(1), 1e-04); var N2 = new Normal(5, 9); - Assert.AreEqual(N2.PDF(-1), 0.03549, 1e-04); + Assert.AreEqual(0.03549, N2.PDF(-1), 1e-04); } /// @@ -352,9 +352,9 @@ public void Test_PDF() public void Test_CDF() { var N = new Normal(5,2); - Assert.AreEqual(N.CDF(0), 0.006209, 1e-04); - Assert.AreEqual(N.CDF(4), 0.30853, 1e-04); - Assert.AreEqual(N.CDF(5), 0.5); + Assert.AreEqual(0.006209, N.CDF(0), 1e-04); + Assert.AreEqual(0.30853, N.CDF(4), 1e-04); + Assert.AreEqual(0.5,N.CDF(5)); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_Pareto.cs b/Test_Numerics/Distributions/Univariate/Test_Pareto.cs index 31ebf8fc..105578aa 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Pareto.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Pareto.cs @@ -95,12 +95,12 @@ public void Test_ParetoDist() public void Test_Construction() { var p = new Pareto(); - Assert.AreEqual(p.Xm, 1); - Assert.AreEqual(p.Alpha, 10); + Assert.AreEqual(1, p.Xm); + Assert.AreEqual(10, p.Alpha); var p2 = new Pareto(1, 1); - Assert.AreEqual(p2.Xm, 1); - Assert.AreEqual(p2.Alpha, 1); + Assert.AreEqual(1, p2.Xm); + Assert.AreEqual(1, p2.Alpha); } /// @@ -126,10 +126,10 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var p = new Pareto(); - Assert.AreEqual(p.ParametersToString[0, 0], "Scale (Xm)"); - Assert.AreEqual(p.ParametersToString[1, 0], "Shape (α)"); - Assert.AreEqual(p.ParametersToString[0, 1], "1"); - Assert.AreEqual(p.ParametersToString[1, 1], "10"); + Assert.AreEqual("Scale (Xm)", p.ParametersToString[0, 0]); + Assert.AreEqual("Shape (α)", p.ParametersToString[1, 0]); + Assert.AreEqual("1", p.ParametersToString[0, 1]); + Assert.AreEqual("10", p.ParametersToString[1, 1]); } /// @@ -153,10 +153,10 @@ public void Test_Moments() public void Test_Mean() { var p = new Pareto(); - Assert.AreEqual(p.Mean, 1.1111, 1e-04); + Assert.AreEqual(1.1111, p.Mean, 1e-04); var p2 = new Pareto(1,1); - Assert.AreEqual(p2.Mean, double.PositiveInfinity); + Assert.AreEqual(double.PositiveInfinity,p2.Mean ); } /// @@ -166,10 +166,10 @@ public void Test_Mean() public void Test_Median() { var p = new Pareto(); - Assert.AreEqual(p.Median, 1.07177, 1e-04); + Assert.AreEqual(1.07177, p.Median, 1e-04); var p2 = new Pareto(1, 1); - Assert.AreEqual(p2.Median, 2); + Assert.AreEqual(2, p2.Median); } /// @@ -179,10 +179,10 @@ public void Test_Median() public void Test_Mode() { var p = new Pareto(); - Assert.AreEqual(p.Mode, 1); + Assert.AreEqual(1, p.Mode); var p2 = new Pareto(2, 1); - Assert.AreEqual(p2.Mode, 2); + Assert.AreEqual(2, p2.Mode); } /// @@ -192,10 +192,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var p = new Pareto(); - Assert.AreEqual(p.StandardDeviation, 0.12422, 1e-04); + Assert.AreEqual(0.12422, p.StandardDeviation, 1e-04); var p2 = new Pareto(1, 2); - Assert.AreEqual(p2.StandardDeviation, double.PositiveInfinity); + Assert.AreEqual(double.PositiveInfinity,p2.StandardDeviation ); } /// @@ -205,10 +205,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var p = new Pareto(); - Assert.AreEqual(p.Skewness, 2.81105, 1e-04); + Assert.AreEqual(2.81105, p.Skewness, 1e-04); var p2 = new Pareto(1, 3); - Assert.AreEqual(p2.Skewness, double.NaN); + Assert.AreEqual(double.NaN,p2.Skewness); } /// @@ -218,10 +218,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var p = new Pareto(); - Assert.AreEqual(p.Kurtosis, 17.82857, 1e-04); + Assert.AreEqual(17.82857, p.Kurtosis, 1e-04); var p2 = new Pareto(1, 4); - Assert.AreEqual(p2.Kurtosis,double.NaN); + Assert.AreEqual(double.NaN,p2.Kurtosis); } /// @@ -231,12 +231,12 @@ public void Test_Kurtosis() public void Test_MinMax() { var p = new Pareto(); - Assert.AreEqual(p.Minimum, 1); - Assert.AreEqual(p.Maximum,double.PositiveInfinity); + Assert.AreEqual(1, p.Minimum); + Assert.AreEqual(double.PositiveInfinity, p.Maximum); var p2 = new Pareto(2,3); - Assert.AreEqual(p2.Minimum, 2); - Assert.AreEqual(p2.Maximum,double.PositiveInfinity); + Assert.AreEqual(2, p2.Minimum); + Assert.AreEqual(double.PositiveInfinity, p2.Maximum); } /// @@ -246,12 +246,12 @@ public void Test_MinMax() public void Test_PDF() { var p = new Pareto(1,1); - Assert.AreEqual(p.PDF(1), 1); - Assert.AreEqual(p.PDF(1.5), 4d / 9d); + Assert.AreEqual(1,p.PDF(1)); + Assert.AreEqual(4d / 9d,p.PDF(1.5) ); var p2 = new Pareto(3, 2); - Assert.AreEqual(p2.PDF(3), 2d / 3d); - Assert.AreEqual(p2.PDF(5), 18d / 125d); + Assert.AreEqual(2d / 3d,p2.PDF(3)); + Assert.AreEqual(18d / 125d,p2.PDF(5) ); } /// @@ -261,8 +261,8 @@ public void Test_PDF() public void Test_CDF() { var p = new Pareto(); - Assert.AreEqual(p.CDF(0), 0); - Assert.AreEqual(p.CDF(2), 0.9990, 1e-04); + Assert.AreEqual(0, p.CDF(0)); + Assert.AreEqual(0.9990, p.CDF(2), 1e-04); } /// @@ -272,8 +272,8 @@ public void Test_CDF() public void Test_InverseCDF() { var p = new Pareto(); - Assert.AreEqual(p.InverseCDF(0), 1); - Assert.AreEqual(p.InverseCDF(0.3), 1.0363, 1e-04); + Assert.AreEqual(1, p.InverseCDF(0)); + Assert.AreEqual(1.0363, p.InverseCDF(0.3), 1e-04); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_PearsonTypeIII.cs b/Test_Numerics/Distributions/Univariate/Test_PearsonTypeIII.cs index b4c190f8..a8d139f4 100644 --- a/Test_Numerics/Distributions/Univariate/Test_PearsonTypeIII.cs +++ b/Test_Numerics/Distributions/Univariate/Test_PearsonTypeIII.cs @@ -94,15 +94,15 @@ public void Test_P3_MOM() double true_mean = 191.31739d; double true_stDev = 47.96161d; double true_skew = 0.86055d; - Assert.AreEqual((xi - true_xi) / true_xi < 0.01d, true); - Assert.AreEqual((beta - true_beta) / true_beta < 0.01d, true); - Assert.AreEqual((alpha - true_alpha) / true_alpha < 0.01d, true); - Assert.AreEqual((mu - true_mu) / true_mu < 0.01d, true); - Assert.AreEqual((sigma - true_sigma) / true_sigma < 0.01d, true); - Assert.AreEqual((gamma - true_gamma) / true_gamma < 0.01d, true); - Assert.AreEqual((mean - true_mean) / true_mean < 0.01d, true); - Assert.AreEqual((stDev - true_stDev) / true_stDev < 0.01d, true); - Assert.AreEqual((skew - true_skew) / true_skew < 0.01d, true); + Assert.IsLessThan(0.01d,(xi - true_xi) / true_xi); + Assert.IsLessThan(0.01d,(beta - true_beta) / true_beta); + Assert.IsLessThan(0.01d, (alpha - true_alpha) / true_alpha); + Assert.IsLessThan(0.01d, (mu - true_mu) / true_mu); + Assert.IsLessThan(0.01d, (sigma - true_sigma) / true_sigma); + Assert.IsLessThan(0.01d, (gamma - true_gamma) / true_gamma); + Assert.IsLessThan(0.01d, (mean - true_mean) / true_mean); + Assert.IsLessThan(0.01d, (stDev - true_stDev) / true_stDev); + Assert.IsLessThan(0.01d, (skew - true_skew) / true_skew); } /// @@ -124,10 +124,10 @@ public void Test_P3_LMOM_Fit() Assert.AreEqual(a, true_a, 0.001d); Assert.AreEqual(b, true_b, 0.001d); var lmom = P3.LinearMomentsFromParameters(P3.GetParameters); - Assert.AreEqual(lmom[0], 1648.806d, 0.001d); - Assert.AreEqual(lmom[1], 138.2366d, 0.001d); - Assert.AreEqual(lmom[2], 0.1033889d, 0.001d); - Assert.AreEqual(lmom[3], 0.1258521d, 0.001d); + Assert.AreEqual(1648.806d, lmom[0], 0.001d); + Assert.AreEqual(138.2366d, lmom[1], 0.001d); + Assert.AreEqual(0.1033889d, lmom[2], 0.001d); + Assert.AreEqual(0.1258521d, lmom[3], 0.001d); } /// @@ -164,15 +164,15 @@ public void Test_P3_MLE() double true_mean = 191.31739d; double true_stDev = 47.01925d; double true_skew = 0.61897d; - Assert.AreEqual((xi - true_xi) / true_xi < 0.01d, true); - Assert.AreEqual((beta - true_beta) / true_beta < 0.01d, true); - Assert.AreEqual((alpha - true_alpha) / true_alpha < 0.01d, true); - Assert.AreEqual((mu - true_mu) / true_mu < 0.01d, true); - Assert.AreEqual((sigma - true_sigma) / true_sigma < 0.01d, true); - Assert.AreEqual((gamma - true_gamma) / true_gamma < 0.01d, true); - Assert.AreEqual((mean - true_mean) / true_mean < 0.01d, true); - Assert.AreEqual((stDev - true_stDev) / true_stDev < 0.01d, true); - Assert.AreEqual((skew - true_skew) / true_skew < 0.01d, true); + Assert.IsLessThan(0.01d, (xi - true_xi) / true_xi); + Assert.IsLessThan(0.01d, (beta - true_beta) / true_beta); + Assert.IsLessThan(0.01d, (alpha - true_alpha) / true_alpha); + Assert.IsLessThan(0.01d, (mu - true_mu) / true_mu); + Assert.IsLessThan(0.01d, (sigma - true_sigma) / true_sigma); + Assert.IsLessThan(0.01d, (gamma - true_gamma) / true_gamma); + Assert.IsLessThan(0.01d, (mean - true_mean) / true_mean); + Assert.IsLessThan(0.01d, (stDev - true_stDev) / true_stDev); + Assert.IsLessThan(0.01d, (skew - true_skew) / true_skew); } /// @@ -192,7 +192,7 @@ public void Test_P3_Quantile() var P3 = new PearsonTypeIII(191.31739d, 47.01925d, -0.61897d); double q999 = P3.InverseCDF(0.99d); double true_q999 = 321.48d; - Assert.AreEqual((q999 - true_q999) / true_q999 < 0.01d, true); + Assert.IsLessThan(0.01d, (q999 - true_q999) / true_q999); } /// @@ -214,13 +214,13 @@ public void Test_P3_StandardError() var P3 = new PearsonTypeIII(191.31739d, 47.96161d, 0.86055d); double qVar999 = Math.Sqrt(P3.QuantileVariance(0.99d, 69, ParameterEstimationMethod.MethodOfMoments)); double true_qVar999 = 27.175d; - Assert.AreEqual((qVar999 - true_qVar999) / true_qVar999 < 0.01d, true); + Assert.IsLessThan(0.01d, (qVar999 - true_qVar999) / true_qVar999); // Maximum Likelihood P3 = new PearsonTypeIII(191.31739d, 47.01925d, 0.61897d); qVar999 = Math.Sqrt(P3.QuantileVariance(0.99d, 69, ParameterEstimationMethod.MaximumLikelihood)); true_qVar999 = 20.045d; - Assert.AreEqual((qVar999 - true_qVar999) / true_qVar999 < 0.01d, true); + Assert.IsLessThan(0.01d, (qVar999 - true_qVar999) / true_qVar999); } /// @@ -230,14 +230,14 @@ public void Test_P3_StandardError() public void Test_Construction() { var P3 = new PearsonTypeIII(); - Assert.AreEqual(P3.Mu, 100); - Assert.AreEqual(P3.Sigma, 10); - Assert.AreEqual(P3.Gamma, 0); + Assert.AreEqual(100,P3.Mu); + Assert.AreEqual(10,P3.Sigma); + Assert.AreEqual(0, P3.Gamma); var P3ii = new PearsonTypeIII(1, 1, 1); - Assert.AreEqual(P3ii.Mu, 1); - Assert.AreEqual(P3ii.Sigma, 1); - Assert.AreEqual(P3ii.Gamma, 1); + Assert.AreEqual(1,P3ii.Mu); + Assert.AreEqual(1, P3ii.Sigma); + Assert.AreEqual(1, P3ii.Gamma); } /// @@ -263,12 +263,12 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var P3 = new PearsonTypeIII(); - Assert.AreEqual(P3.ParametersToString[0, 0], "Mean (µ)"); - Assert.AreEqual(P3.ParametersToString[1, 0], "Std Dev (σ)"); - Assert.AreEqual(P3.ParametersToString[2, 0], "Skew (γ)"); - Assert.AreEqual(P3.ParametersToString[0, 1], "100"); - Assert.AreEqual(P3.ParametersToString[1, 1], "10"); - Assert.AreEqual(P3.ParametersToString[2, 1], "0"); + Assert.AreEqual("Mean (µ)",P3.ParametersToString[0, 0]); + Assert.AreEqual("Std Dev (σ)", P3.ParametersToString[1, 0]); + Assert.AreEqual("Skew (γ)", P3.ParametersToString[2, 0]); + Assert.AreEqual("100", P3.ParametersToString[0, 1]); + Assert.AreEqual("10", P3.ParametersToString[1, 1]); + Assert.AreEqual("0", P3.ParametersToString[2, 1]); } /// @@ -292,10 +292,10 @@ public void Test_Moments() public void Test_Mean() { var P3 = new PearsonTypeIII(); - Assert.AreEqual(P3.Mean, 100); + Assert.AreEqual(100, P3.Mean); var P3ii = new PearsonTypeIII(100, 1, 1); - Assert.AreEqual(P3ii.Mean, 100); + Assert.AreEqual(100, P3ii.Mean); } /// @@ -305,7 +305,7 @@ public void Test_Mean() public void Test_Median() { var P3 = new PearsonTypeIII(); - Assert.AreEqual(P3.Median, 100); + Assert.AreEqual(100, P3.Median); } /// @@ -315,10 +315,10 @@ public void Test_Median() public void Test_Mode() { var P3 = new PearsonTypeIII(); - Assert.AreEqual(P3.Mode, 100); + Assert.AreEqual(100, P3.Mode); var P3ii = new PearsonTypeIII(1, 1, 1); - Assert.AreEqual(P3ii.Mode, 0.5); + Assert.AreEqual(0.5, P3ii.Mode); } /// @@ -328,10 +328,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var P3 = new PearsonTypeIII(); - Assert.AreEqual(P3.StandardDeviation, 10); + Assert.AreEqual(10, P3.StandardDeviation); var P3ii = new PearsonTypeIII(1, 1, 1); - Assert.AreEqual(P3ii.StandardDeviation, 1); + Assert.AreEqual(1, P3ii.StandardDeviation); } /// @@ -341,10 +341,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var P3 = new PearsonTypeIII(); - Assert.AreEqual(P3.Skewness, 0); + Assert.AreEqual(0, P3.Skewness); var P3ii = new PearsonTypeIII(1, 1, 1); - Assert.AreEqual(P3.Skewness, 0); + Assert.AreEqual(0, P3.Skewness); } /// @@ -354,10 +354,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var P3 = new PearsonTypeIII(); - Assert.AreEqual(P3.Kurtosis, 3); + Assert.AreEqual(3, P3.Kurtosis); var P3ii = new PearsonTypeIII(1, 1, 1); - Assert.AreEqual(P3ii.Kurtosis, 4.5); + Assert.AreEqual(4.5, P3ii.Kurtosis); } /// @@ -367,10 +367,10 @@ public void Test_Kurtosis() public void Test_Minimum() { var P3 = new PearsonTypeIII(); - Assert.AreEqual(P3.Minimum,double.NegativeInfinity); + Assert.AreEqual(double.NegativeInfinity,P3.Minimum); var P3ii = new PearsonTypeIII(1, 1, 1); - Assert.AreEqual(P3ii.Minimum, -1); + Assert.AreEqual(-1, P3ii.Minimum); } /// @@ -380,10 +380,10 @@ public void Test_Minimum() public void Test_Maximum() { var P3 = new PearsonTypeIII(); - Assert.AreEqual(P3.Maximum,double.PositiveInfinity); + Assert.AreEqual(double.PositiveInfinity, P3.Maximum); var P3ii = new PearsonTypeIII(1, 1, 1); - Assert.AreEqual(P3ii.Maximum, double.PositiveInfinity); + Assert.AreEqual(double.PositiveInfinity, P3ii.Maximum); } } diff --git a/Test_Numerics/Distributions/Univariate/Test_Pert.cs b/Test_Numerics/Distributions/Univariate/Test_Pert.cs index 0342bf32..799b90b2 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Pert.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Pert.cs @@ -90,9 +90,9 @@ public void Test_PertDist2() var p = P.CDF(-0.5); var q = P.InverseCDF(p); - Assert.AreEqual(d, 0.5273438, 1E-5); - Assert.AreEqual(p, 0.1035156, 1E-5); - Assert.AreEqual(q, -0.5, 1E-5); + Assert.AreEqual(0.5273438, d, 1E-5); + Assert.AreEqual(0.1035156, p, 1E-5); + Assert.AreEqual(-0.5, q, 1E-5); } @@ -152,9 +152,9 @@ public void Test_Pert_MLE() public void Test_Construction() { var p = new Pert(); - Assert.AreEqual(p.Min, 0); - Assert.AreEqual(p.Max, 1); - Assert.AreEqual(p.Mode, 0.5); + Assert.AreEqual(0,p.Min); + Assert.AreEqual(1, p.Max); + Assert.AreEqual(0.5, p.Mode); } /// @@ -183,12 +183,12 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var p = new Pert(); - Assert.AreEqual(p.ParametersToString[0, 0], "Min (a)"); - Assert.AreEqual(p.ParametersToString[1,0], "Most Likely (c)"); - Assert.AreEqual(p.ParametersToString[2, 0], "Max (b)"); - Assert.AreEqual(p.ParametersToString[0, 1], "0"); - Assert.AreEqual(p.ParametersToString[1, 1], "0.5"); - Assert.AreEqual(p.ParametersToString[2, 1], "1"); + Assert.AreEqual("Min (a)",p.ParametersToString[0, 0] ); + Assert.AreEqual("Most Likely (c)", p.ParametersToString[1, 0]); + Assert.AreEqual("Max (b)", p.ParametersToString[2, 0]); + Assert.AreEqual("0", p.ParametersToString[0, 1]); + Assert.AreEqual("0.5", p.ParametersToString[1, 1]); + Assert.AreEqual("1", p.ParametersToString[2, 1]); } /// @@ -212,10 +212,10 @@ public void Test_Moments() public void Test_Mean() { var p = new Pert(); - Assert.AreEqual(p.Mean, 0.5); + Assert.AreEqual(0.5, p.Mean); var p2 = new Pert(0, 0, 0); - Assert.AreEqual(p2.Mean, 0); + Assert.AreEqual(0, p2.Mean); } /// @@ -225,10 +225,10 @@ public void Test_Mean() public void Test_Median() { var p = new Pert(); - Assert.AreEqual(p.Median, 0.5); + Assert.AreEqual(0.5, p.Median); var p2 = new Pert(0,0,0); - Assert.AreEqual(p2.Median, 0); + Assert.AreEqual(0, p2.Median); } /// @@ -238,7 +238,7 @@ public void Test_Median() public void Test_Mode() { var p = new Pert(); - Assert.AreEqual(p.Mode, 0.5); + Assert.AreEqual(0.5, p.Mode); } /// @@ -248,7 +248,7 @@ public void Test_Mode() public void Test_StandardDeviation() { var p = new Pert(); - Assert.AreEqual(p.StandardDeviation, 0.1889, 1e-04); + Assert.AreEqual(0.1889, p.StandardDeviation, 1e-04); } /// @@ -258,7 +258,7 @@ public void Test_StandardDeviation() public void Test_Skewness() { var p = new Pert(); - Assert.AreEqual(p.Skewness, 0); + Assert.AreEqual(0,p.Skewness); } /// @@ -268,7 +268,7 @@ public void Test_Skewness() public void Test_Kurtosis() { var p = new Pert(); - Assert.AreEqual(p.Kurtosis, 2.3333,1e-04); + Assert.AreEqual(2.3333, p.Kurtosis, 1e-04); } /// @@ -278,12 +278,12 @@ public void Test_Kurtosis() public void Test_MinMax() { var p = new Pert(); - Assert.AreEqual(p.Minimum, 0); - Assert.AreEqual(p.Maximum, 1); + Assert.AreEqual(0,p.Minimum); + Assert.AreEqual(1, p.Maximum); var p2 = new Pert(1, 1.5, 2); - Assert.AreEqual(p2.Minimum, 1); - Assert.AreEqual(p2.Maximum, 2); + Assert.AreEqual(1, p2.Minimum); + Assert.AreEqual(2, p2.Maximum); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_PertPercentileDists.cs b/Test_Numerics/Distributions/Univariate/Test_PertPercentileDists.cs index 27ec0076..370fad88 100644 --- a/Test_Numerics/Distributions/Univariate/Test_PertPercentileDists.cs +++ b/Test_Numerics/Distributions/Univariate/Test_PertPercentileDists.cs @@ -82,9 +82,9 @@ public void Test_PertPercentile_AtRisk() Assert.AreEqual(pert.StandardDeviation, true_stdDev, 1E-3); Assert.AreEqual(pert.Skewness, true_skew, 1E-3); Assert.AreEqual(pert.Kurtosis, true_kurt, 1E-3); - Assert.AreEqual(pert.CDF(true_icdf05), 0.05, 1E-3); - Assert.AreEqual(pert.CDF(true_icdf50), 0.5, 1E-3); - Assert.AreEqual(pert.CDF(true_icdf95), 0.95, 1E-3); + Assert.AreEqual(0.05,pert.CDF(true_icdf05), 1E-3); + Assert.AreEqual(0.5, pert.CDF(true_icdf50), 1E-3); + Assert.AreEqual(0.95, pert.CDF(true_icdf95), 1E-3); Assert.AreEqual(pert.InverseCDF(0.05d), true_icdf05, 1E-3); Assert.AreEqual(pert.InverseCDF(0.5d), true_icdf50, 1E-3); Assert.AreEqual(pert.InverseCDF(0.95d), true_icdf95, 1E-3); @@ -111,9 +111,9 @@ public void Test_PertPercentileZ_AtRisk() Assert.AreEqual(pert.Mean, true_mean, 1E-2); Assert.AreEqual(pert.Median, true_median, 1E-2); Assert.AreEqual(pert.Mode, true_mode, 1E-2); - Assert.AreEqual(pert.CDF(true_icdf05), 0.05, 1E-2); - Assert.AreEqual(pert.CDF(true_icdf50), 0.5, 1E-2); - Assert.AreEqual(pert.CDF(true_icdf95), 0.95, 1E-2); + Assert.AreEqual(0.05, pert.CDF(true_icdf05), 1E-2); + Assert.AreEqual(0.5, pert.CDF(true_icdf50), 1E-2); + Assert.AreEqual(0.95, pert.CDF(true_icdf95), 1E-2); Assert.AreEqual(pert.InverseCDF(0.05d), true_icdf05, 1E-2); Assert.AreEqual(pert.InverseCDF(0.5d), true_icdf50, 1E-2); Assert.AreEqual(pert.InverseCDF(0.95d), true_icdf95, 1E-2); @@ -134,9 +134,9 @@ public void Test_PertPercentile() var p2 = pert.CDF(fiftieth); var p3 = pert.CDF(ninetyfifth); - Assert.AreEqual(pert.CDF(fifth), 0.05, 1E-1); - Assert.AreEqual(pert.CDF(fiftieth), 0.5, 1E-1); - Assert.AreEqual(pert.CDF(ninetyfifth), 0.95, 1E-1); + Assert.AreEqual(0.05, pert.CDF(fifth), 1E-1); + Assert.AreEqual(0.5, pert.CDF(fiftieth), 1E-1); + Assert.AreEqual(0.95, pert.CDF(ninetyfifth), 1E-1); Assert.AreEqual(pert.InverseCDF(0.05d), fifth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.5d), fiftieth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.95d), ninetyfifth, 1E-1); @@ -149,9 +149,9 @@ public void Test_PertPercentile() p2 = pert.CDF(fiftieth); p3 = pert.CDF(ninetyfifth); - Assert.AreEqual(pert.CDF(fifth), 0.05, 1E-1); - Assert.AreEqual(pert.CDF(fiftieth), 0.5, 1E-1); - Assert.AreEqual(pert.CDF(ninetyfifth), 0.95, 1E-1); + Assert.AreEqual(0.05, pert.CDF(fifth), 1E-1); + Assert.AreEqual(0.5, pert.CDF(fiftieth), 1E-1); + Assert.AreEqual(0.95, pert.CDF(ninetyfifth), 1E-1); Assert.AreEqual(pert.InverseCDF(0.05d), fifth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.5d), fiftieth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.95d), ninetyfifth, 1E-1); @@ -164,9 +164,9 @@ public void Test_PertPercentile() p2 = pert.CDF(fiftieth); p3 = pert.CDF(ninetyfifth); - Assert.AreEqual(pert.CDF(fifth), 0.05, 1E-1); - Assert.AreEqual(pert.CDF(fiftieth), 0.5, 1E-1); - Assert.AreEqual(pert.CDF(ninetyfifth), 0.95, 1E-1); + Assert.AreEqual(0.05, pert.CDF(fifth), 1E-1); + Assert.AreEqual(0.5, pert.CDF(fiftieth), 1E-1); + Assert.AreEqual(0.95, pert.CDF(ninetyfifth), 1E-1); Assert.AreEqual(pert.InverseCDF(0.05d), fifth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.5d), fiftieth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.95d), ninetyfifth, 1E-1); @@ -180,9 +180,9 @@ public void Test_PertPercentile() p2 = pert.CDF(fiftieth); p3 = pert.CDF(ninetyfifth); - Assert.AreEqual(pert.CDF(fifth), 0.05, 1E-1); - Assert.AreEqual(pert.CDF(fiftieth), 0.5, 1E-1); - Assert.AreEqual(pert.CDF(ninetyfifth), 0.95, 1E-1); + Assert.AreEqual(0.05, pert.CDF(fifth), 1E-1); + Assert.AreEqual(0.5, pert.CDF(fiftieth), 1E-1); + Assert.AreEqual(0.95, pert.CDF(ninetyfifth), 1E-1); Assert.AreEqual(pert.InverseCDF(0.05d), fifth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.5d), fiftieth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.95d), ninetyfifth, 1E-1); @@ -205,9 +205,9 @@ public void Test_PertPercentileZ() var p2 = pert.CDF(fiftieth); var p3 = pert.CDF(ninetyfifth); - Assert.AreEqual(pert.CDF(fifth), 0.05, 1E-1); - Assert.AreEqual(pert.CDF(fiftieth), 0.5, 1E-1); - Assert.AreEqual(pert.CDF(ninetyfifth), 0.95, 1E-1); + Assert.AreEqual(0.05, pert.CDF(fifth), 1E-1); + Assert.AreEqual(0.5, pert.CDF(fiftieth), 1E-1); + Assert.AreEqual(0.95, pert.CDF(ninetyfifth), 1E-1); Assert.AreEqual(pert.InverseCDF(0.05d), fifth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.5d), fiftieth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.95d), ninetyfifth, 1E-1); @@ -220,9 +220,9 @@ public void Test_PertPercentileZ() p2 = pert.CDF(fiftieth); p3 = pert.CDF(ninetyfifth); - Assert.AreEqual(pert.CDF(fifth), 0.05, 1E-1); - Assert.AreEqual(pert.CDF(fiftieth), 0.5, 1E-1); - Assert.AreEqual(pert.CDF(ninetyfifth), 0.95, 1E-1); + Assert.AreEqual(0.05, pert.CDF(fifth), 1E-1); + Assert.AreEqual(0.5, pert.CDF(fiftieth), 1E-1); + Assert.AreEqual(0.95, pert.CDF(ninetyfifth), 1E-1); Assert.AreEqual(pert.InverseCDF(0.05d), fifth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.5d), fiftieth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.95d), ninetyfifth, 1E-1); @@ -235,9 +235,9 @@ public void Test_PertPercentileZ() p2 = pert.CDF(fiftieth); p3 = pert.CDF(ninetyfifth); - Assert.AreEqual(pert.CDF(fifth), 0.05, 1E-1); - Assert.AreEqual(pert.CDF(fiftieth), 0.5, 1E-1); - Assert.AreEqual(pert.CDF(ninetyfifth), 0.95, 1E-1); + Assert.AreEqual(0.05, pert.CDF(fifth), 1E-1); + Assert.AreEqual(0.5, pert.CDF(fiftieth), 1E-1); + Assert.AreEqual(0.95, pert.CDF(ninetyfifth), 1E-1); Assert.AreEqual(pert.InverseCDF(0.05d), fifth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.5d), fiftieth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.95d), ninetyfifth, 1E-1); @@ -251,9 +251,9 @@ public void Test_PertPercentileZ() p2 = pert.CDF(fiftieth); p3 = pert.CDF(ninetyfifth); - Assert.AreEqual(pert.CDF(fifth), 0.05, 1E-1); - Assert.AreEqual(pert.CDF(fiftieth), 0.5, 1E-1); - Assert.AreEqual(pert.CDF(ninetyfifth), 0.95, 1E-1); + Assert.AreEqual(0.05, pert.CDF(fifth), 1E-1); + Assert.AreEqual(0.5, pert.CDF(fiftieth), 1E-1); + Assert.AreEqual(0.95, pert.CDF(ninetyfifth), 1E-1); Assert.AreEqual(pert.InverseCDF(0.05d), fifth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.5d), fiftieth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.95d), ninetyfifth, 1E-1); diff --git a/Test_Numerics/Distributions/Univariate/Test_Poisson.cs b/Test_Numerics/Distributions/Univariate/Test_Poisson.cs index 0197d3c3..9ab9b4bd 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Poisson.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Poisson.cs @@ -92,10 +92,10 @@ public void Test_PoissonDist() public void Test_Construction() { var P = new Poisson(); - Assert.AreEqual(P.Lambda, 1); + Assert.AreEqual(1,P.Lambda); var P2 = new Poisson(10); - Assert.AreEqual(P2.Lambda, 10); + Assert.AreEqual(10,P2.Lambda); } /// @@ -121,8 +121,8 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var P = new Poisson(); - Assert.AreEqual(P.ParametersToString[0, 0], "Rate (λ)"); - Assert.AreEqual(P.ParametersToString[0, 1], "1"); + Assert.AreEqual("Rate (λ)",P.ParametersToString[0, 0] ); + Assert.AreEqual("1",P.ParametersToString[0, 1]); } /// @@ -146,10 +146,10 @@ public void Test_Moments() public void Test_Mean() { var P = new Poisson(); - Assert.AreEqual(P.Mean, 1); + Assert.AreEqual(1,P.Mean); var P2 = new Poisson(10); - Assert.AreEqual(P2.Mean, 10); + Assert.AreEqual(10,P2.Mean ); } /// @@ -159,7 +159,7 @@ public void Test_Mean() public void Test_Median() { var P = new Poisson(); - Assert.AreEqual(P.Median, 1, 1E-4); + Assert.AreEqual(1, P.Median, 1E-4); } /// @@ -169,10 +169,10 @@ public void Test_Median() public void Test_Mode() { var P = new Poisson(); - Assert.AreEqual(P.Mode, 1); + Assert.AreEqual(1, P.Mode); var P2 = new Poisson(2.4); - Assert.AreEqual(P2.Mode, 2); + Assert.AreEqual(2,P2.Mode); } /// @@ -182,10 +182,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var P = new Poisson(); - Assert.AreEqual(P.StandardDeviation, 1); + Assert.AreEqual(1,P.StandardDeviation); var P2 = new Poisson(4); - Assert.AreEqual(P2.StandardDeviation, 2); + Assert.AreEqual(2, P2.StandardDeviation); } /// @@ -195,10 +195,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var P = new Poisson(); - Assert.AreEqual(P.Skewness, 1); + Assert.AreEqual(1, P.Skewness); var P2 = new Poisson(4); - Assert.AreEqual(P2.Skewness, 0.5); + Assert.AreEqual(0.5, P2.Skewness); } /// @@ -208,10 +208,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var P = new Poisson(); - Assert.AreEqual(P.Kurtosis, 4); + Assert.AreEqual(4, P.Kurtosis); var P2 = new Poisson(4); - Assert.AreEqual(P2.Kurtosis, 3.25); + Assert.AreEqual(3.25, P2.Kurtosis); } /// @@ -221,12 +221,12 @@ public void Test_Kurtosis() public void Test_MinMax() { var P = new Poisson(); - Assert.AreEqual(P.Minimum, 0); - Assert.AreEqual(P.Maximum,double.PositiveInfinity); + Assert.AreEqual(0, P.Minimum); + Assert.AreEqual(double.PositiveInfinity,P.Maximum); var P2 = new Poisson(4); - Assert.AreEqual(P2.Minimum, 0); - Assert.AreEqual(P2.Maximum, double.PositiveInfinity); + Assert.AreEqual(0, P2.Minimum); + Assert.AreEqual(double.PositiveInfinity, P2.Maximum); } /// @@ -236,12 +236,12 @@ public void Test_MinMax() public void Test_PDF() { var P = new Poisson(1.5); - Assert.AreEqual(P.PDF(1), 0.33469, 1e-04); - Assert.AreEqual(P.PDF(10), 0.00000354, 1e-08); + Assert.AreEqual(0.33469, P.PDF(1), 1e-04); + Assert.AreEqual(0.00000354, P.PDF(10), 1e-08); var P2 = new Poisson(5.4); - Assert.AreEqual(P2.PDF(1), 0.024389, 1e-05); - Assert.AreEqual(P2.PDF(10), 0.02624, 1e-05); + Assert.AreEqual(0.024389, P2.PDF(1), 1e-05); + Assert.AreEqual(0.02624, P2.PDF(10), 1e-05); } /// @@ -251,12 +251,12 @@ public void Test_PDF() public void Test_CDF() { var P = new Poisson(1.5); - Assert.AreEqual(P.CDF(1), 0.55782, 1e-04); - Assert.AreEqual(P.CDF(10), 0.999999, 1e-06); + Assert.AreEqual(0.55782, P.CDF(1), 1e-04); + Assert.AreEqual(0.999999, P.CDF(10), 1e-06); var P2 = new Poisson(10.8); - Assert.AreEqual(P2.CDF(1), 0.00024, 1e-05); - Assert.AreEqual(P2.CDF(10), 0.483969, 1e-05); + Assert.AreEqual(0.00024, P2.CDF(1), 1e-05); + Assert.AreEqual(0.483969, P2.CDF(10), 1e-05); } /// @@ -266,8 +266,8 @@ public void Test_CDF() public void Test_InverseCDF() { var P = new Poisson(); - Assert.AreEqual(P.InverseCDF(0), 0); - Assert.AreEqual(P.InverseCDF(1), double.PositiveInfinity); + Assert.AreEqual(0,P.InverseCDF(0)); + Assert.AreEqual(double.PositiveInfinity,P.InverseCDF(1)); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_Rayleigh.cs b/Test_Numerics/Distributions/Univariate/Test_Rayleigh.cs index dce618fa..66b5c9c6 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Rayleigh.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Rayleigh.cs @@ -84,10 +84,10 @@ public void Test_RayleighDist() public void Test_Construction() { var R = new Rayleigh(); - Assert.AreEqual(R.Sigma, 10); + Assert.AreEqual(10,R.Sigma); var R2 = new Rayleigh(2); - Assert.AreEqual(R2.Sigma, 2); + Assert.AreEqual(2, R2.Sigma); } /// @@ -113,8 +113,8 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var R = new Rayleigh(); - Assert.AreEqual(R.ParametersToString[0, 0], "Scale (σ)"); - Assert.AreEqual(R.ParametersToString[0, 1], "10"); + Assert.AreEqual("Scale (σ)", R.ParametersToString[0, 0]); + Assert.AreEqual("10", R.ParametersToString[0, 1]); } /// @@ -138,10 +138,10 @@ public void Test_Moments() public void Test_Mean() { var R = new Rayleigh(); - Assert.AreEqual(R.Mean, 12.53314, 1e-04); + Assert.AreEqual(12.53314, R.Mean, 1e-04); var R2 = new Rayleigh(1); - Assert.AreEqual(R2.Mean, 1.25331, 1e-04); + Assert.AreEqual(1.25331, R2.Mean, 1e-04); } /// @@ -151,10 +151,10 @@ public void Test_Mean() public void Test_Median() { var R = new Rayleigh(); - Assert.AreEqual(R.Median, 11.7741, 1e-04); + Assert.AreEqual(11.7741, R.Median, 1e-04); var R2 = new Rayleigh(1); - Assert.AreEqual(R2.Median, 1.1774, 1e-04); + Assert.AreEqual(1.1774, R2.Median, 1e-04); } /// @@ -164,10 +164,10 @@ public void Test_Median() public void Test_Mode() { var R = new Rayleigh(); - Assert.AreEqual(R.Mode, 10); + Assert.AreEqual(10, R.Mode); var R2 = new Rayleigh(1); - Assert.AreEqual(R2.Mode, 1); + Assert.AreEqual(1, R2.Mode); } /// @@ -177,10 +177,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var R = new Rayleigh(); - Assert.AreEqual(R.StandardDeviation, 6.55136, 1e-05); + Assert.AreEqual(6.55136, R.StandardDeviation, 1e-05); var R2 = new Rayleigh(1); - Assert.AreEqual(R2.StandardDeviation, 0.65513, 1e-04); + Assert.AreEqual(0.65513, R2.StandardDeviation, 1e-04); } /// @@ -190,10 +190,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var R = new Rayleigh(); - Assert.AreEqual(R.Skewness, 0.63111, 1e-04); + Assert.AreEqual(0.63111, R.Skewness, 1e-04); var R2 = new Rayleigh(1); - Assert.AreEqual(R2.Skewness, 0.63111, 1e-04); + Assert.AreEqual(0.63111, R2.Skewness, 1e-04); } /// @@ -203,10 +203,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var R = new Rayleigh(); - Assert.AreEqual(R.Kurtosis, 3.24508,1e-05); + Assert.AreEqual(3.24508, R.Kurtosis, 1e-05); var R2 = new Rayleigh(1); - Assert.AreEqual(R2.Kurtosis, 3.24508,1e-05); + Assert.AreEqual(3.24508, R2.Kurtosis,1e-05); } /// @@ -216,8 +216,8 @@ public void Test_Kurtosis() public void Test_MinMax() { var R = new Rayleigh(); - Assert.AreEqual(R.Minimum, 0); - Assert.AreEqual(R.Maximum, double.PositiveInfinity); + Assert.AreEqual(0,R.Minimum); + Assert.AreEqual(double.PositiveInfinity,R.Maximum); } /// @@ -227,11 +227,11 @@ public void Test_MinMax() public void Test_PDF() { var R = new Rayleigh(); - Assert.AreEqual(R.PDF(-1), 0); - Assert.AreEqual(R.PDF(1), 9.9501e-03, 1e-06); + Assert.AreEqual(0,R.PDF(-1)); + Assert.AreEqual(9.9501e-03, R.PDF(1), 1e-06); var R2 = new Rayleigh(1); - Assert.AreEqual(R.PDF(2), 0.019603, 1e-05); + Assert.AreEqual(0.019603, R.PDF(2), 1e-05); } /// @@ -241,8 +241,8 @@ public void Test_PDF() public void Test_CDF() { var R = new Rayleigh(); - Assert.AreEqual(R.CDF(-1), 0); - Assert.AreEqual(R.CDF(1), 4.9875e-03,1e-04); + Assert.AreEqual(0, R.CDF(-1)); + Assert.AreEqual(4.9875e-03, R.CDF(1),1e-04); } /// @@ -252,9 +252,9 @@ public void Test_CDF() public void Test_InverseCDF() { var R = new Rayleigh(); - Assert.AreEqual(R.InverseCDF(0), 0); - Assert.AreEqual(R.InverseCDF(1), double.PositiveInfinity); - Assert.AreEqual(R.InverseCDF(0.4), 10.1076, 1e-04); + Assert.AreEqual(0, R.InverseCDF(0)); + Assert.AreEqual(double.PositiveInfinity,R.InverseCDF(1) ); + Assert.AreEqual(10.1076, R.InverseCDF(0.4), 1e-04); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_StudentT.cs b/Test_Numerics/Distributions/Univariate/Test_StudentT.cs index eddf7d04..e8e66234 100644 --- a/Test_Numerics/Distributions/Univariate/Test_StudentT.cs +++ b/Test_Numerics/Distributions/Univariate/Test_StudentT.cs @@ -111,14 +111,14 @@ public void Test_StudentT_InverseCDF() public void Test_Construction() { var t = new StudentT(); - Assert.AreEqual(t.Mu, 0); - Assert.AreEqual(t.Sigma, 1); - Assert.AreEqual(t.DegreesOfFreedom, 10); + Assert.AreEqual(0, t.Mu); + Assert.AreEqual(1, t.Sigma); + Assert.AreEqual(10, t.DegreesOfFreedom); var t2 = new StudentT(10, 10, 10); - Assert.AreEqual(t2.Mu, 10); - Assert.AreEqual(t2.Sigma, 10); - Assert.AreEqual(t2.DegreesOfFreedom, 10); + Assert.AreEqual(10, t2.Mu); + Assert.AreEqual(10, t2.Sigma); + Assert.AreEqual(10, t2.DegreesOfFreedom); } /// @@ -144,12 +144,12 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var t = new StudentT(); - Assert.AreEqual(t.ParametersToString[0, 0], "Location (µ)"); - Assert.AreEqual(t.ParametersToString[1, 0], "Scale (σ)"); - Assert.AreEqual(t.ParametersToString[2, 0], "Degrees of Freedom (ν)"); - Assert.AreEqual(t.ParametersToString[0, 1], "0"); - Assert.AreEqual(t.ParametersToString[1,1],"1"); - Assert.AreEqual(t.ParametersToString[2, 1], "10"); + Assert.AreEqual("Location (µ)", t.ParametersToString[0, 0]); + Assert.AreEqual("Scale (σ)", t.ParametersToString[1, 0]); + Assert.AreEqual("Degrees of Freedom (ν)", t.ParametersToString[2, 0]); + Assert.AreEqual("0", t.ParametersToString[0, 1]); + Assert.AreEqual("1", t.ParametersToString[1, 1]); + Assert.AreEqual("10", t.ParametersToString[2, 1]); } /// @@ -173,10 +173,10 @@ public void Test_Moments() public void Test_Mean() { var t = new StudentT(); - Assert.AreEqual(t.Mean, 0); + Assert.AreEqual(0, t.Mean); var t2 = new StudentT(1, 1, 1); - Assert.AreEqual(t2.Mean, double.NaN); + Assert.AreEqual(double.NaN, t2.Mean); } /// @@ -186,10 +186,10 @@ public void Test_Mean() public void Test_Median() { var t = new StudentT(); - Assert.AreEqual(t.Median, 0); + Assert.AreEqual(0, t.Median); var t2 = new StudentT(1, 1, 1); - Assert.AreEqual(t2.Median, 1); + Assert.AreEqual(1, t2.Median); } /// @@ -199,10 +199,10 @@ public void Test_Median() public void Test_Mode() { var t = new StudentT(); - Assert.AreEqual(t.Mode, 0); + Assert.AreEqual(0, t.Mode); var t2 = new StudentT(1,1,1); - Assert.AreEqual(t2.Mode, 1); + Assert.AreEqual(1, t2.Mode); } /// @@ -212,13 +212,13 @@ public void Test_Mode() public void Test_StandardDeviation() { var t = new StudentT(); - Assert.AreEqual(t.StandardDeviation, 1.11803, 1e-04); + Assert.AreEqual(1.11803, t.StandardDeviation, 1e-04); var t2 = new StudentT(1, 1, 2); - Assert.AreEqual(t2.StandardDeviation,double.PositiveInfinity); + Assert.AreEqual(double.PositiveInfinity, t2.StandardDeviation); var t3 = new StudentT(1, 1, 1); - Assert.AreEqual(t3.StandardDeviation, double.NaN); + Assert.AreEqual(double.NaN, t3.StandardDeviation); } /// @@ -228,10 +228,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var t = new StudentT(); - Assert.AreEqual(t.Skewness, 0); + Assert.AreEqual(0, t.Skewness); var t2 = new StudentT(1, 1, 1); - Assert.AreEqual(t2.Skewness, double.NaN); + Assert.AreEqual(double.NaN,t2.Skewness ); } /// @@ -241,13 +241,13 @@ public void Test_Skewness() public void Test_Kurtosis() { var t = new StudentT(); - Assert.AreEqual(t.Kurtosis, 4); + Assert.AreEqual(4, t.Kurtosis ); var t2 = new StudentT(1, 1, 4); - Assert.AreEqual(t2.Kurtosis, double.PositiveInfinity); + Assert.AreEqual(double.PositiveInfinity, t2.Kurtosis); var t3 = new StudentT(1, 1, 2); - Assert.AreEqual(t3.Kurtosis, double.NaN); + Assert.AreEqual(double.NaN, t3.Kurtosis); } /// @@ -257,8 +257,8 @@ public void Test_Kurtosis() public void Test_MinMax() { var t = new StudentT(); - Assert.AreEqual(t.Minimum, double.NegativeInfinity); - Assert.AreEqual(t.Maximum, double.PositiveInfinity); + Assert.AreEqual(double.NegativeInfinity, t.Minimum); + Assert.AreEqual(double.PositiveInfinity, t.Maximum); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_Triangular.cs b/Test_Numerics/Distributions/Univariate/Test_Triangular.cs index 348e38db..49756414 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Triangular.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Triangular.cs @@ -136,14 +136,14 @@ public void Test_Triangular_MLE() public void Test_Construction() { var T = new Triangular(); - Assert.AreEqual(T.Min, 0); - Assert.AreEqual(T.Mode, 0.5); - Assert.AreEqual(T.Max, 1); + Assert.AreEqual(0,T.Min); + Assert.AreEqual(0.5, T.Mode); + Assert.AreEqual(1, T.Max); var T2 = new Triangular(-1,1,2); - Assert.AreEqual(T2.Min, -1); - Assert.AreEqual(T2.Mode, 1); - Assert.AreEqual(T2.Max, 2); + Assert.AreEqual(-1, T2.Min); + Assert.AreEqual(1, T2.Mode); + Assert.AreEqual(2, T2.Max); } /// @@ -172,12 +172,12 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var T = new Triangular(); - Assert.AreEqual(T.ParametersToString[0, 0], "Min (a)"); - Assert.AreEqual(T.ParametersToString[1, 0], "Most Likely (c)"); - Assert.AreEqual(T.ParametersToString[2, 0], "Max (b)"); - Assert.AreEqual(T.ParametersToString[0, 1], "0"); - Assert.AreEqual(T.ParametersToString[1, 1], "0.5"); - Assert.AreEqual(T.ParametersToString[2, 1], "1"); + Assert.AreEqual("Min (a)",T.ParametersToString[0, 0] ); + Assert.AreEqual("Most Likely (c)",T.ParametersToString[1, 0] ); + Assert.AreEqual("Max (b)",T.ParametersToString[2, 0] ); + Assert.AreEqual("0", T.ParametersToString[0, 1]); + Assert.AreEqual("0.5", T.ParametersToString[1, 1]); + Assert.AreEqual("1", T.ParametersToString[2, 1]); } /// @@ -201,10 +201,10 @@ public void Test_Moments() public void Test_Mean() { var T = new Triangular(); - Assert.AreEqual(T.Mean, 0.5); + Assert.AreEqual(0.5, T.Mean); var T2 = new Triangular(1, 3, 6); - Assert.AreEqual(T2.Mean, 3.3333, 1e-04); + Assert.AreEqual(3.3333, T2.Mean, 1e-04); } /// @@ -214,10 +214,10 @@ public void Test_Mean() public void Test_Median() { var T = new Triangular(); - Assert.AreEqual(T.Median, 0.5); + Assert.AreEqual(0.5, T.Median); var T2 = new Triangular(1,3,6); - Assert.AreEqual(T2.Median, 3.26138, 1e-05); + Assert.AreEqual(3.26138, T2.Median, 1e-05); } /// @@ -227,10 +227,10 @@ public void Test_Median() public void Test_Mode() { var T = new Triangular(); - Assert.AreEqual(T.Mode, 0.5); + Assert.AreEqual(0.5, T.Mode); var T2 = new Triangular(1, 3, 6); - Assert.AreEqual(T2.Mode, 3); + Assert.AreEqual(3, T2.Mode); } /// @@ -240,10 +240,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var T = new Triangular(); - Assert.AreEqual(T.StandardDeviation, 0.20412, 1e-04); + Assert.AreEqual(0.20412, T.StandardDeviation, 1e-04); var T2 = new Triangular(1, 3, 6); - Assert.AreEqual(T2.StandardDeviation, 1.02739, 1e-04); + Assert.AreEqual(1.02739, T2.StandardDeviation, 1e-04); } /// @@ -253,7 +253,7 @@ public void Test_StandardDeviation() public void Test_Skewness() { var T = new Triangular(); - Assert.AreEqual(T.Skewness, 0); + Assert.AreEqual(0, T.Skewness); } /// @@ -263,10 +263,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var T = new Triangular(); - Assert.AreEqual(T.Kurtosis, 12d / 5d); + Assert.AreEqual(12d / 5d, T.Kurtosis); var T2 = new Triangular(1, 3, 6); - Assert.AreEqual(T2.Kurtosis, 12d / 5d); + Assert.AreEqual(12d / 5d, T2.Kurtosis); } /// @@ -276,12 +276,12 @@ public void Test_Kurtosis() public void Test_MinMax() { var T = new Triangular(); - Assert.AreEqual(T.Minimum, 0); - Assert.AreEqual(T.Maximum, 1); + Assert.AreEqual(0, T.Minimum); + Assert.AreEqual(1, T.Maximum); var T2 = new Triangular(1, 3, 6); - Assert.AreEqual(T2.Minimum, 1); - Assert.AreEqual(T2.Maximum, 6); + Assert.AreEqual(1, T2.Minimum); + Assert.AreEqual(6, T2.Maximum); } /// @@ -291,13 +291,13 @@ public void Test_MinMax() public void Test_PDF() { var T = new Triangular(); - Assert.AreEqual(T.PDF(-1), 0); - Assert.AreEqual(T.PDF(0.4), 1.6); - Assert.AreEqual(T.PDF(0.6), 1.6); - Assert.AreEqual(T.PDF(0.5), 2); + Assert.AreEqual(0,T.PDF(-1)); + Assert.AreEqual(1.6, T.PDF(0.4)); + Assert.AreEqual(1.6, T.PDF(0.6)); + Assert.AreEqual(2, T.PDF(0.5)); var T2 = new Triangular(1, 3, 6); - Assert.AreEqual(T2.PDF(2), 0.2, 1e-04); + Assert.AreEqual(0.2, T2.PDF(2), 1e-04); } /// @@ -307,13 +307,13 @@ public void Test_PDF() public void Test_CDF() { var T = new Triangular(); - Assert.AreEqual(T.CDF(-1), 0); - Assert.AreEqual(T.CDF(2), 1); - Assert.AreEqual(T.CDF(0.4), 0.32,1e-04); - Assert.AreEqual(T.CDF(0.6), 0.68,1e-04); + Assert.AreEqual(0, T.CDF(-1)); + Assert.AreEqual(1, T.CDF(2)); + Assert.AreEqual(0.32, T.CDF(0.4), 1e-04); + Assert.AreEqual(0.68, T.CDF(0.6), 1e-04); var T2 = new Triangular(1,3, 6); - Assert.AreEqual(T2.CDF(2), 0.1, 1e-04); + Assert.AreEqual(0.1, T2.CDF(2), 1e-04); } /// @@ -323,10 +323,10 @@ public void Test_CDF() public void Test_InverseCDF() { var T = new Triangular(); - Assert.AreEqual(T.InverseCDF(0), 0); - Assert.AreEqual(T.InverseCDF(1), 1); - Assert.AreEqual(T.InverseCDF(0.2), 0.31622, 1e-04); - Assert.AreEqual(T.InverseCDF(0.5), 0.5); + Assert.AreEqual(0, T.InverseCDF(0)); + Assert.AreEqual(1,T.InverseCDF(1)); + Assert.AreEqual(0.31622, T.InverseCDF(0.2), 1e-04); + Assert.AreEqual(0.5, T.InverseCDF(0.5)); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_TruncatedDistribution.cs b/Test_Numerics/Distributions/Univariate/Test_TruncatedDistribution.cs index 7bf07f0f..9af0500e 100644 --- a/Test_Numerics/Distributions/Univariate/Test_TruncatedDistribution.cs +++ b/Test_Numerics/Distributions/Univariate/Test_TruncatedDistribution.cs @@ -59,27 +59,27 @@ public void Test_TruncatedNormalDist() var p = tn.CDF(1.5); var q = tn.InverseCDF(p); - Assert.AreEqual(d, 0.9786791, 1E-5); - Assert.AreEqual(p, 0.3460251, 1E-5); - Assert.AreEqual(q, 1.5, 1E-5); + Assert.AreEqual(0.9786791, d, 1E-5); + Assert.AreEqual(0.3460251, p, 1E-5); + Assert.AreEqual(1.5, q, 1E-5); tn = new TruncatedDistribution(new Normal(10, 3), 8, 25); d = tn.PDF(12.75); p = tn.CDF(12.75); q = tn.InverseCDF(p); - Assert.AreEqual(d, 0.1168717, 1E-5); - Assert.AreEqual(p, 0.7596566, 1E-5); - Assert.AreEqual(q, 12.75, 1E-5); + Assert.AreEqual(0.1168717, d, 1E-5); + Assert.AreEqual(0.7596566, p, 1E-5); + Assert.AreEqual(12.75, q, 1E-5); tn = new TruncatedDistribution(new Normal(0, 3), 0, 9); d = tn.PDF(4.5); p = tn.CDF(4.5); q = tn.InverseCDF(p); - Assert.AreEqual(d, 0.08657881, 1E-5); - Assert.AreEqual(p, 0.868731, 1E-5); - Assert.AreEqual(q, 4.5, 1E-5); + Assert.AreEqual(0.08657881, d, 1E-5); + Assert.AreEqual(0.868731, p, 1E-5); + Assert.AreEqual(4.5, q, 1E-5); } @@ -90,16 +90,16 @@ public void Test_TruncatedNormalDist() public void Test_Construction() { var tn = new TruncatedDistribution(new Normal(0.5, 0.2), 0, 1); - Assert.AreEqual(((Normal)tn.BaseDistribution).Mu, 0.5); - Assert.AreEqual(((Normal)tn.BaseDistribution).Sigma, 0.2); - Assert.AreEqual(tn.Min, 0); - Assert.AreEqual(tn.Max, 1); + Assert.AreEqual(0.5,((Normal)tn.BaseDistribution).Mu); + Assert.AreEqual(0.2, ((Normal)tn.BaseDistribution).Sigma); + Assert.AreEqual(0, tn.Min); + Assert.AreEqual(1, tn.Max); var tn2 = new TruncatedDistribution(new Normal(1, 1), 1, 2); - Assert.AreEqual(((Normal)tn2.BaseDistribution).Mu, 1); - Assert.AreEqual(((Normal)tn2.BaseDistribution).Sigma, 1); - Assert.AreEqual(tn2.Min, 1); - Assert.AreEqual(tn2.Max, 2); + Assert.AreEqual(1, ((Normal)tn2.BaseDistribution).Mu); + Assert.AreEqual(1, ((Normal)tn2.BaseDistribution).Sigma); + Assert.AreEqual(1, tn2.Min); + Assert.AreEqual(2, tn2.Max); } /// @@ -109,14 +109,14 @@ public void Test_Construction() public void Test_ParametersToString() { var tn = new TruncatedDistribution(new Normal(0.5, 0.2), 0, 1); - Assert.AreEqual(tn.ParametersToString[0, 0], "Mean (µ)"); - Assert.AreEqual(tn.ParametersToString[1, 0], "Std Dev (σ)"); - Assert.AreEqual(tn.ParametersToString[2, 0], "Min"); - Assert.AreEqual(tn.ParametersToString[3, 0], "Max"); - Assert.AreEqual(tn.ParametersToString[0, 1], "0.5"); - Assert.AreEqual(tn.ParametersToString[1, 1], "0.2"); - Assert.AreEqual(tn.ParametersToString[2, 1], "0"); - Assert.AreEqual(tn.ParametersToString[3, 1], "1"); + Assert.AreEqual("Mean (µ)",tn.ParametersToString[0, 0]); + Assert.AreEqual("Std Dev (σ)", tn.ParametersToString[1, 0]); + Assert.AreEqual("Min", tn.ParametersToString[2, 0]); + Assert.AreEqual("Max", tn.ParametersToString[3, 0]); + Assert.AreEqual("0.5", tn.ParametersToString[0, 1]); + Assert.AreEqual("0.2", tn.ParametersToString[1, 1]); + Assert.AreEqual("0", tn.ParametersToString[2, 1]); + Assert.AreEqual("1", tn.ParametersToString[3, 1]); } /// @@ -140,7 +140,7 @@ public void Test_Moments() public void Test_Mean() { var tn = new TruncatedDistribution(new Normal(0.5, 0.2), 0, 1); - Assert.AreEqual(tn.Mean, 0.5, 1e-4); + Assert.AreEqual(0.5, tn.Mean, 1e-4); } /// @@ -150,7 +150,7 @@ public void Test_Mean() public void Test_Median() { var tn = new TruncatedDistribution(new Normal(0.5, 0.2), 0, 1); - Assert.AreEqual(tn.Median, 0.5, 1e-4); + Assert.AreEqual(0.5, tn.Median, 1e-4); } /// @@ -160,7 +160,7 @@ public void Test_Median() public void Test_Mode() { var tn = new TruncatedDistribution(new Normal(0.5, 0.2), 0, 1); - Assert.AreEqual(tn.Mode, 0.5, 1e-4); + Assert.AreEqual(0.5, tn.Mode, 1e-4); } /// @@ -170,7 +170,7 @@ public void Test_Mode() public void Test_StandardDeviation() { var tn = new TruncatedDistribution(new Normal(0.5, 0.2), 0, 1); - Assert.AreEqual(tn.StandardDeviation, 0.19091, 1e-4); + Assert.AreEqual(0.19091, tn.StandardDeviation, 1e-4); } /// @@ -180,7 +180,7 @@ public void Test_StandardDeviation() public void Test_Skewness() { var tn = new TruncatedDistribution(new Normal(0.5, 0.2), 0, 1); - Assert.AreEqual(tn.Skewness, 0, 1E-4); + Assert.AreEqual(0, tn.Skewness, 1E-4); } /// @@ -190,7 +190,7 @@ public void Test_Skewness() public void Test_Kurtosis() { var tn = new TruncatedDistribution(new Normal(0.5, 0.2), 0, 1); - Assert.AreEqual(tn.Kurtosis, 2.62422, 1e-04); + Assert.AreEqual(2.62422, tn.Kurtosis, 1e-04); } /// @@ -200,8 +200,8 @@ public void Test_Kurtosis() public void Test_MinMax() { var tn = new TruncatedDistribution(new Normal(0.5, 0.2), 0, 1); - Assert.AreEqual(tn.Minimum, 0); - Assert.AreEqual(tn.Maximum, 1); + Assert.AreEqual(0, tn.Minimum); + Assert.AreEqual(1, tn.Maximum); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_TruncatedNormal.cs b/Test_Numerics/Distributions/Univariate/Test_TruncatedNormal.cs index d7fb8b80..92d1a714 100644 --- a/Test_Numerics/Distributions/Univariate/Test_TruncatedNormal.cs +++ b/Test_Numerics/Distributions/Univariate/Test_TruncatedNormal.cs @@ -66,27 +66,27 @@ public void Test_TruncatedNormalDist() var p = tn.CDF(1.5); var q = tn.InverseCDF(p); - Assert.AreEqual(d, 0.9786791, 1E-5); - Assert.AreEqual(p, 0.3460251, 1E-5); - Assert.AreEqual(q, 1.5, 1E-5); + Assert.AreEqual(0.9786791, d, 1E-5); + Assert.AreEqual(0.3460251, p, 1E-5); + Assert.AreEqual(1.5, q, 1E-5); tn = new TruncatedNormal(10, 3, 8, 25); d = tn.PDF(12.75); p = tn.CDF(12.75); q = tn.InverseCDF(p); - Assert.AreEqual(d, 0.1168717, 1E-5); - Assert.AreEqual(p, 0.7596566, 1E-5); - Assert.AreEqual(q, 12.75, 1E-5); + Assert.AreEqual(0.1168717, d ,1E-5); + Assert.AreEqual(0.7596566, p, 1E-5); + Assert.AreEqual(12.75, q, 1E-5); tn = new TruncatedNormal(0, 3, 0, 9); d = tn.PDF(4.5); p = tn.CDF(4.5); q = tn.InverseCDF(p); - Assert.AreEqual(d, 0.08657881, 1E-5); - Assert.AreEqual(p, 0.868731, 1E-5); - Assert.AreEqual(q, 4.5, 1E-5); + Assert.AreEqual(0.08657881, d, 1E-5); + Assert.AreEqual(0.868731, p, 1E-5); + Assert.AreEqual(4.5, q, 1E-5); } @@ -97,16 +97,16 @@ public void Test_TruncatedNormalDist() public void Test_Construction() { var tn = new TruncatedNormal(); - Assert.AreEqual(tn.Mu, 0.5); - Assert.AreEqual(tn.Sigma, 0.2); - Assert.AreEqual(tn.Min, 0); - Assert.AreEqual(tn.Max, 1); + Assert.AreEqual(0.5,tn.Mu ); + Assert.AreEqual(0.2,tn.Sigma ); + Assert.AreEqual(0,tn.Min); + Assert.AreEqual(1,tn.Max); var tn2 = new TruncatedNormal(1, 1, 1, 2); - Assert.AreEqual(tn2.Mu, 1); - Assert.AreEqual(tn2.Sigma, 1); - Assert.AreEqual(tn2.Min, 1); - Assert.AreEqual(tn2.Max, 2); + Assert.AreEqual(1,tn2.Mu); + Assert.AreEqual(1,tn2.Sigma); + Assert.AreEqual(1,tn2.Min); + Assert.AreEqual(2,tn2.Max); } /// @@ -135,14 +135,14 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var tn = new TruncatedNormal(); - Assert.AreEqual(tn.ParametersToString[0, 0], "Mean (µ)"); - Assert.AreEqual(tn.ParametersToString[1, 0], "Std Dev (σ)"); - Assert.AreEqual(tn.ParametersToString[2, 0], "Min"); - Assert.AreEqual(tn.ParametersToString[3, 0], "Max"); - Assert.AreEqual(tn.ParametersToString[0, 1], "0.5"); - Assert.AreEqual(tn.ParametersToString[1, 1], "0.2"); - Assert.AreEqual(tn.ParametersToString[2, 1], "0"); - Assert.AreEqual(tn.ParametersToString[3, 1], "1"); + Assert.AreEqual("Mean (µ)",tn.ParametersToString[0, 0]); + Assert.AreEqual("Std Dev (σ)",tn.ParametersToString[1, 0]); + Assert.AreEqual("Min",tn.ParametersToString[2, 0] ); + Assert.AreEqual("Max", tn.ParametersToString[3, 0] ); + Assert.AreEqual("0.5",tn.ParametersToString[0, 1]); + Assert.AreEqual("0.2", tn.ParametersToString[1, 1]); + Assert.AreEqual("0", tn.ParametersToString[2, 1]); + Assert.AreEqual("1", tn.ParametersToString[3, 1]); } /// @@ -166,7 +166,7 @@ public void Test_Moments() public void Test_Mean() { var tn = new TruncatedNormal(); - Assert.AreEqual(tn.Mean, 0.5); + Assert.AreEqual(0.5,tn.Mean); } /// @@ -176,7 +176,7 @@ public void Test_Mean() public void Test_Median() { var tn = new TruncatedNormal(); - Assert.AreEqual(tn.Median, 0.5); + Assert.AreEqual(0.5, tn.Median); } /// @@ -186,7 +186,7 @@ public void Test_Median() public void Test_Mode() { var tn = new TruncatedNormal(); - Assert.AreEqual(tn.Mode, 0.5); + Assert.AreEqual(0.5, tn.Mode); } /// @@ -196,7 +196,7 @@ public void Test_Mode() public void Test_StandardDeviation() { var tn = new TruncatedNormal(); - Assert.AreEqual(tn.StandardDeviation, 0.19091,1e-05); + Assert.AreEqual(0.19091, tn.StandardDeviation, 1e-05); } /// @@ -206,7 +206,7 @@ public void Test_StandardDeviation() public void Test_Skewness() { var tn = new TruncatedNormal(); - Assert.AreEqual(tn.Skewness, 0); + Assert.AreEqual(0, tn.Skewness); } /// @@ -216,7 +216,7 @@ public void Test_Skewness() public void Test_Kurtosis() { var tn = new TruncatedNormal(); - Assert.AreEqual(tn.Kurtosis, 2.62422, 1e-04); + Assert.AreEqual(2.62422, tn.Kurtosis, 1e-04); } /// @@ -226,8 +226,8 @@ public void Test_Kurtosis() public void Test_MinMax() { var tn = new TruncatedNormal(); - Assert.AreEqual(tn.Minimum, 0); - Assert.AreEqual(tn.Maximum, 1); + Assert.AreEqual(0, tn.Minimum); + Assert.AreEqual(1, tn.Maximum); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_Uniform.cs b/Test_Numerics/Distributions/Univariate/Test_Uniform.cs index 7b2cd14d..56f17684 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Uniform.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Uniform.cs @@ -105,12 +105,12 @@ public void Test_Uniform_R() public void Test_Construction() { var U = new Uniform(); - Assert.AreEqual(U.Min, 0); - Assert.AreEqual(U.Max, 1); + Assert.AreEqual(0,U.Min); + Assert.AreEqual(1, U.Max); var U2 = new Uniform(2,10); - Assert.AreEqual(U2.Min, 2); - Assert.AreEqual(U2.Max, 10); + Assert.AreEqual(2, U2.Min); + Assert.AreEqual(10, U2.Max); } /// @@ -142,10 +142,10 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var U = new Uniform(); - Assert.AreEqual(U.ParametersToString[0, 0], "Min"); - Assert.AreEqual(U.ParametersToString[1, 0], "Max"); - Assert.AreEqual(U.ParametersToString[0, 1], "0"); - Assert.AreEqual(U.ParametersToString[1, 1], "1"); + Assert.AreEqual("Min",U.ParametersToString[0, 0]); + Assert.AreEqual("Max", U.ParametersToString[1, 0]); + Assert.AreEqual("0", U.ParametersToString[0, 1]); + Assert.AreEqual("1", U.ParametersToString[1, 1]); } /// @@ -169,10 +169,10 @@ public void Test_Moments() public void Test_Mean() { var U = new Uniform(); - Assert.AreEqual(U.Mean, 0.5); + Assert.AreEqual(0.5, U.Mean); var U2 = new Uniform(2, 10); - Assert.AreEqual(U2.Mean, 6); + Assert.AreEqual(6, U2.Mean); } /// @@ -182,10 +182,10 @@ public void Test_Mean() public void Test_Median() { var U = new Uniform(); - Assert.AreEqual(U.Median, 0.5); + Assert.AreEqual(0.5, U.Median); var U2 = new Uniform(2, 10); - Assert.AreEqual(U2.Median, 6); + Assert.AreEqual(6, U2.Median); } /// @@ -195,10 +195,10 @@ public void Test_Median() public void Test_Mode() { var U = new Uniform(); - Assert.AreEqual(U.Mode,double.NaN); + Assert.AreEqual(double.NaN,U.Mode); var U2 = new Uniform(2, 10); - Assert.AreEqual(U2.Mode,double.NaN); + Assert.AreEqual(double.NaN, U2.Mode); } /// @@ -208,10 +208,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var U = new Uniform(); - Assert.AreEqual(U.StandardDeviation, 0.288675, 1e-05); + Assert.AreEqual(0.288675, U.StandardDeviation, 1e-05); var U2 = new Uniform(2, 10); - Assert.AreEqual(U2.StandardDeviation, 2.3094, 1e-04); + Assert.AreEqual(2.3094, U2.StandardDeviation, 1e-04); } /// @@ -221,10 +221,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var U = new Uniform(); - Assert.AreEqual(U.Skewness, 0); + Assert.AreEqual(0, U.Skewness); var U2 = new Uniform(2, 10); - Assert.AreEqual(U2.Skewness, 0); + Assert.AreEqual(0, U2.Skewness); } /// @@ -234,10 +234,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var U = new Uniform(); - Assert.AreEqual(U.Kurtosis, 9d / 5d); + Assert.AreEqual(9d / 5d,U.Kurtosis); var U2 = new Uniform(2, 10); - Assert.AreEqual(U2.Kurtosis, 9d / 5d); + Assert.AreEqual(9d / 5d, U2.Kurtosis); } /// @@ -247,12 +247,12 @@ public void Test_Kurtosis() public void Test_MinMax() { var U = new Uniform(); - Assert.AreEqual(U.Minimum, 0); - Assert.AreEqual(U.Maximum, 1); + Assert.AreEqual(0, U.Minimum); + Assert.AreEqual(1, U.Maximum); var U2 = new Uniform(2, 10); - Assert.AreEqual(U2.Minimum, 2); - Assert.AreEqual(U2.Maximum, 10); + Assert.AreEqual(2, U2.Minimum); + Assert.AreEqual(10, U2.Maximum); } /// @@ -262,9 +262,9 @@ public void Test_MinMax() public void Test_PDF() { var U = new Uniform(); - Assert.AreEqual(U.PDF(-1),0); - Assert.AreEqual(U.PDF(2),0); - Assert.AreEqual(U.PDF(1), 1); + Assert.AreEqual(0, U.PDF(-1)); + Assert.AreEqual(0, U.PDF(2)); + Assert.AreEqual(1, U.PDF(1)); } /// @@ -274,9 +274,9 @@ public void Test_PDF() public void Test_CDF() { var U = new Uniform(); - Assert.AreEqual(U.CDF(0),0); - Assert.AreEqual(U.CDF(1),1); - Assert.AreEqual(U.CDF(0.5), 0.5); + Assert.AreEqual(0,U.CDF(0)); + Assert.AreEqual(1,U.CDF(1)); + Assert.AreEqual(0.5,U.CDF(0.5)); } /// @@ -286,9 +286,9 @@ public void Test_CDF() public void Test_InverseCDF() { var U = new Uniform(); - Assert.AreEqual(U.InverseCDF(0), 0); - Assert.AreEqual(U.InverseCDF(1), 1); - Assert.AreEqual(U.InverseCDF(0.3), 0.3); + Assert.AreEqual(0,U.InverseCDF(0)); + Assert.AreEqual(1,U.InverseCDF(1)); + Assert.AreEqual(0.3,U.InverseCDF(0.3)); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_UniformDiscrete.cs b/Test_Numerics/Distributions/Univariate/Test_UniformDiscrete.cs index bb12c566..60264d36 100644 --- a/Test_Numerics/Distributions/Univariate/Test_UniformDiscrete.cs +++ b/Test_Numerics/Distributions/Univariate/Test_UniformDiscrete.cs @@ -90,12 +90,12 @@ public void Test_UniformDiscreteDist() public void Test_Construction() { var U = new UniformDiscrete(); - Assert.AreEqual(U.Min, 0); - Assert.AreEqual(U.Max, 1); + Assert.AreEqual(0,U.Min); + Assert.AreEqual(1,U.Max); var U2 = new UniformDiscrete(2, 10); - Assert.AreEqual(U2.Min, 2); - Assert.AreEqual(U2.Max, 10); + Assert.AreEqual(2, U2.Min); + Assert.AreEqual(10, U2.Max); } /// @@ -127,10 +127,10 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var U = new UniformDiscrete(); - Assert.AreEqual(U.ParametersToString[0, 0], "Min"); - Assert.AreEqual(U.ParametersToString[1, 0], "Max"); - Assert.AreEqual(U.ParametersToString[0, 1], "0"); - Assert.AreEqual(U.ParametersToString[1, 1], "1"); + Assert.AreEqual("Min",U.ParametersToString[0, 0] ); + Assert.AreEqual("Max", U.ParametersToString[1, 0]); + Assert.AreEqual("0", U.ParametersToString[0, 1]); + Assert.AreEqual("1", U.ParametersToString[1, 1]); } /// @@ -140,10 +140,10 @@ public void Test_ParametersToString() public void Test_Mean() { var U = new UniformDiscrete(); - Assert.AreEqual(U.Mean, 0.5); + Assert.AreEqual(0.5, U.Mean); var U2 = new UniformDiscrete(2, 10); - Assert.AreEqual(U2.Mean, 6); + Assert.AreEqual(6, U2.Mean); } /// @@ -153,10 +153,10 @@ public void Test_Mean() public void Test_Median() { var U = new UniformDiscrete(); - Assert.AreEqual(U.Median, 0.5); + Assert.AreEqual(0.5, U.Median); var U2 = new UniformDiscrete(2, 10); - Assert.AreEqual(U2.Median, 6); + Assert.AreEqual(6, U2.Median); } /// @@ -166,10 +166,10 @@ public void Test_Median() public void Test_Mode() { var U = new UniformDiscrete(); - Assert.AreEqual(U.Mode, double.NaN); + Assert.AreEqual(double.NaN,U.Mode); var U2 = new UniformDiscrete(2, 10); - Assert.AreEqual(U2.Mode, double.NaN); + Assert.AreEqual(double.NaN, U2.Mode); } /// @@ -179,10 +179,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var U = new UniformDiscrete(); - Assert.AreEqual(U.StandardDeviation, 0.288675, 1e-05); + Assert.AreEqual(0.288675, U.StandardDeviation, 1e-05); var U2 = new UniformDiscrete(2, 10); - Assert.AreEqual(U2.StandardDeviation, 2.3094, 1e-04); + Assert.AreEqual(2.3094, U2.StandardDeviation, 1e-04); } /// @@ -192,10 +192,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var U = new UniformDiscrete(); - Assert.AreEqual(U.Skewness, 0); + Assert.AreEqual(0, U.Skewness); var U2 = new UniformDiscrete(2, 10); - Assert.AreEqual(U2.Skewness, 0); + Assert.AreEqual(0, U2.Skewness); } /// @@ -205,10 +205,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var U = new UniformDiscrete(); - Assert.AreEqual(U.Kurtosis, 1); + Assert.AreEqual(1, U.Kurtosis); var U2 = new UniformDiscrete(2, 10); - Assert.AreEqual(U2.Kurtosis, 1.77); + Assert.AreEqual(1.77, U2.Kurtosis); } /// @@ -218,12 +218,12 @@ public void Test_Kurtosis() public void Test_MinMax() { var U = new UniformDiscrete(); - Assert.AreEqual(U.Minimum, 0); - Assert.AreEqual(U.Maximum, 1); + Assert.AreEqual(0, U.Minimum); + Assert.AreEqual(1, U.Maximum); var U2 = new UniformDiscrete(2, 10); - Assert.AreEqual(U2.Minimum, 2); - Assert.AreEqual(U2.Maximum, 10); + Assert.AreEqual(2, U2.Minimum); + Assert.AreEqual(10, U2.Maximum); } /// @@ -233,9 +233,9 @@ public void Test_MinMax() public void Test_PDF() { var U = new UniformDiscrete(); - Assert.AreEqual(U.PDF(-1), 0); - Assert.AreEqual(U.PDF(2), 0); - Assert.AreEqual(U.PDF(1), 0.5); + Assert.AreEqual(0, U.PDF(-1)); + Assert.AreEqual(0, U.PDF(2)); + Assert.AreEqual(0.5, U.PDF(1)); } /// @@ -245,9 +245,9 @@ public void Test_PDF() public void Test_CDF() { var U = new UniformDiscrete(); - Assert.AreEqual(U.CDF(0), 0.5); - Assert.AreEqual(U.CDF(1), 1); - Assert.AreEqual(U.CDF(0.5), 0.75); + Assert.AreEqual(0.5, U.CDF(0)); + Assert.AreEqual(1, U.CDF(1)); + Assert.AreEqual(0.75, U.CDF(0.5)); } /// @@ -257,9 +257,9 @@ public void Test_CDF() public void Test_InverseCDF() { var U = new UniformDiscrete(); - Assert.AreEqual(U.InverseCDF(0), 0); - Assert.AreEqual(U.InverseCDF(1), 1); - Assert.AreEqual(U.InverseCDF(0.3), 0); + Assert.AreEqual(0, U.InverseCDF(0)); + Assert.AreEqual(1,U.InverseCDF(1)); + Assert.AreEqual(0,U.InverseCDF(0.3)); } } diff --git a/Test_Numerics/Distributions/Univariate/Test_Weibull.cs b/Test_Numerics/Distributions/Univariate/Test_Weibull.cs index 1b9ea373..79284819 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Weibull.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Weibull.cs @@ -71,8 +71,8 @@ public void Test_Weibull_MLE_Fit() double kappa = W.Kappa; double true_L = 9.589d; double true_k = 1.907d; - Assert.AreEqual((lamda - true_L) / true_L < 0.01d, true); - Assert.AreEqual((kappa - true_k) / true_k < 0.01d, true); + Assert.IsLessThan(0.01d,(lamda - true_L) / true_L); + Assert.IsLessThan(0.01d,(kappa - true_k) / true_k); } /// @@ -92,10 +92,10 @@ public void Test_Weibull_Quantile() var W = new Weibull(9.589d, 1.907d); double q100 = W.InverseCDF(0.99d); double true_q100 = 21.358d; - Assert.AreEqual((q100 - true_q100) / true_q100 < 0.01d, true); + Assert.IsLessThan(0.01d, (q100 - true_q100) / true_q100); double p = W.CDF(q100); double true_p = 0.99d; - Assert.AreEqual((p - true_p) / true_p < 0.01d, true); + Assert.IsLessThan(0.01d, (p - true_p) / true_p); } /// @@ -117,7 +117,7 @@ public void Test_Weibull_StandardError() var GUM = new Gumbel(8049.6d, 4478.6d); double qVar99 = Math.Sqrt(GUM.QuantileVariance(0.99d, 53, ParameterEstimationMethod.MaximumLikelihood)); double true_qVar99 = 2486.5d; - Assert.AreEqual((qVar99 - true_qVar99) / true_qVar99 < 0.01d, true); + Assert.IsLessThan(0.01d, (qVar99 - true_qVar99) / true_qVar99); } /// @@ -137,8 +137,8 @@ public void Test_Weibull_GOF() modeled[i] = W.CDF(sample[i]); double true_AIC = 294.5878d; double true_BIC = 298.1566d; - Assert.AreEqual((AIC - true_AIC) / true_AIC < 0.01d, true); - Assert.AreEqual((BIC - true_BIC) / true_BIC < 0.01d, true); + Assert.IsLessThan(0.01d, (AIC - true_AIC) / true_AIC); + Assert.IsLessThan(0.01d, (BIC - true_BIC) / true_BIC); } /// @@ -148,12 +148,12 @@ public void Test_Weibull_GOF() public void Test_Construction() { var W = new Weibull(); - Assert.AreEqual(W.Lambda, 10); - Assert.AreEqual(W.Kappa, 2); + Assert.AreEqual(10,W.Lambda); + Assert.AreEqual(2, W.Kappa); var W2 = new Weibull(1, 1); - Assert.AreEqual(W2.Lambda, 1); - Assert.AreEqual(W2.Kappa, 1); + Assert.AreEqual(1, W2.Lambda); + Assert.AreEqual(1, W2.Kappa); } /// @@ -182,10 +182,10 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var W = new Weibull(); - Assert.AreEqual(W.ParametersToString[0, 0], "Scale (λ)"); - Assert.AreEqual(W.ParametersToString[1, 0], "Shape (κ)"); - Assert.AreEqual(W.ParametersToString[0, 1], "10"); - Assert.AreEqual(W.ParametersToString[1, 1], "2"); + Assert.AreEqual("Scale (λ)",W.ParametersToString[0, 0] ); + Assert.AreEqual("Shape (κ)", W.ParametersToString[1, 0]); + Assert.AreEqual("10", W.ParametersToString[0, 1]); + Assert.AreEqual("2", W.ParametersToString[1, 1]); } /// @@ -209,10 +209,10 @@ public void Test_Moments() public void Test_Mean() { var W = new Weibull(0.1, 1); - Assert.AreEqual(W.Mean, 0.1); + Assert.AreEqual(0.1, W.Mean); var W2 = new Weibull(1, 1); - Assert.AreEqual(W2.Mean, 1); + Assert.AreEqual(1, W2.Mean); } /// @@ -222,10 +222,10 @@ public void Test_Mean() public void Test_Median() { var W = new Weibull(0.1, 1); - Assert.AreEqual(W.Median, 0.06931, 1e-04); + Assert.AreEqual(0.06931, W.Median, 1e-04); var W2 = new Weibull(1, 1); - Assert.AreEqual(W2.Median, 0.69314, 1e-04); + Assert.AreEqual(0.69314, W2.Median, 1e-04); } /// @@ -235,10 +235,10 @@ public void Test_Median() public void Test_Mode() { var W = new Weibull(0.1, 1); - Assert.AreEqual(W.Mode, 0); + Assert.AreEqual(0, W.Mode); var W2 = new Weibull(10, 10); - Assert.AreEqual(W2.Mode, 9.89519, 1e-05); + Assert.AreEqual(9.89519, W2.Mode, 1e-05); } /// @@ -248,10 +248,10 @@ public void Test_Mode() public void Test_StandardDeviation() { var W = new Weibull(0.1, 1); - Assert.AreEqual(W.StandardDeviation, 0.1); + Assert.AreEqual(0.1, W.StandardDeviation); var W2 = new Weibull(1, 1); - Assert.AreEqual(W2.StandardDeviation, 1); + Assert.AreEqual(1, W2.StandardDeviation); } /// @@ -261,10 +261,10 @@ public void Test_StandardDeviation() public void Test_Skewness() { var W = new Weibull(0.1, 1); - Assert.AreEqual(W.Skewness, 2,1e-04); + Assert.AreEqual(2, W.Skewness, 1e-04); var W2 = new Weibull(1, 1); - Assert.AreEqual(W2.Skewness, 2); + Assert.AreEqual(2, W2.Skewness); } /// @@ -274,10 +274,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var W = new Weibull(); - Assert.AreEqual(W.Kurtosis, 3.24508,1e-04); + Assert.AreEqual(3.24508, W.Kurtosis,1e-04); var W2 = new Weibull(1, 1); - Assert.AreEqual(W2.Kurtosis, 9); + Assert.AreEqual(9, W2.Kurtosis); } /// @@ -287,8 +287,8 @@ public void Test_Kurtosis() public void Test_MinMax() { var W = new Weibull(); - Assert.AreEqual(W.Minimum, 0); - Assert.AreEqual(W.Maximum,double.PositiveInfinity); + Assert.AreEqual(0, W.Minimum); + Assert.AreEqual(double.PositiveInfinity,W.Maximum); } /// @@ -298,9 +298,9 @@ public void Test_MinMax() public void Test_PDF() { var W = new Weibull(1, 1); - Assert.AreEqual(W.PDF(0), 1); - Assert.AreEqual(W.PDF(1), 0.36787, 1e-05); - Assert.AreEqual(W.PDF(10), 0.00004539, 1e-08); + Assert.AreEqual(1, W.PDF(0)); + Assert.AreEqual(0.36787, W.PDF(1), 1e-05); + Assert.AreEqual(0.00004539, W.PDF(10), 1e-08); } /// @@ -310,9 +310,9 @@ public void Test_PDF() public void Test_CDF() { var W = new Weibull(1, 1); - Assert.AreEqual(W.CDF(0), 0); - Assert.AreEqual(W.CDF(1), 0.63212, 1e-05); - Assert.AreEqual(W.CDF(10), 0.99995, 1e-05); + Assert.AreEqual(0, W.CDF(0)); + Assert.AreEqual(0.63212, W.CDF(1), 1e-05); + Assert.AreEqual(0.99995, W.CDF(10), 1e-05); } /// @@ -322,9 +322,9 @@ public void Test_CDF() public void Test_InverseCDF() { var W = new Weibull(); - Assert.AreEqual(W.InverseCDF(0),0); - Assert.AreEqual(W.InverseCDF(1),double.PositiveInfinity); - Assert.AreEqual(W.InverseCDF(0.4), 7.1472, 1e-04); + Assert.AreEqual(0,W.InverseCDF(0)); + Assert.AreEqual(double.PositiveInfinity,W.InverseCDF(1)); + Assert.AreEqual(7.1472, W.InverseCDF(0.4), 1e-04); } } } diff --git a/Test_Numerics/Functions/Test_Functions.cs b/Test_Numerics/Functions/Test_Functions.cs index 3028e140..06cf0a27 100644 --- a/Test_Numerics/Functions/Test_Functions.cs +++ b/Test_Numerics/Functions/Test_Functions.cs @@ -232,12 +232,12 @@ public void Test_Tabular_Function() // Given X double X = 50.0; double Y = func.Function(X); - Assert.AreEqual(Y, 100.0); + Assert.AreEqual(100.0,Y); // Given Y double Y2 = 100d; double X2 = func.InverseFunction(Y2); - Assert.AreEqual(X, 50.0); + Assert.AreEqual(50.0, X); // Given X - Interpolation double X3 = 75.0d; diff --git a/Test_Numerics/Machine Learning/Supervised/Test_DecisionTree.cs b/Test_Numerics/Machine Learning/Supervised/Test_DecisionTree.cs index 7410df34..8f8040c2 100644 --- a/Test_Numerics/Machine Learning/Supervised/Test_DecisionTree.cs +++ b/Test_Numerics/Machine Learning/Supervised/Test_DecisionTree.cs @@ -90,7 +90,7 @@ public void Test_DecisionTree_Iris() var accuracy = GoodnessOfFit.Accuracy(Y_test.Array, prediction); // Accuracy should be greater than or equal to 90% - Assert.IsTrue(accuracy >= 90); + Assert.IsGreaterThanOrEqualTo(90,accuracy); } @@ -135,7 +135,7 @@ public void Test_DecisionTree_Regression() var lmR2 = GoodnessOfFit.RSquared(Y_test.Array, lmPredict); // Linear regress is better - Assert.IsTrue(treeR2 < lmR2); + Assert.IsLessThan(lmR2,treeR2 ); } diff --git a/Test_Numerics/Machine Learning/Supervised/Test_RandomForest.cs b/Test_Numerics/Machine Learning/Supervised/Test_RandomForest.cs index 51d43289..3aba7072 100644 --- a/Test_Numerics/Machine Learning/Supervised/Test_RandomForest.cs +++ b/Test_Numerics/Machine Learning/Supervised/Test_RandomForest.cs @@ -89,7 +89,7 @@ public void Test_RandomForest_Iris() var accuracy = GoodnessOfFit.Accuracy(Y_test.Array, prediction.GetColumn(1)); // Accuracy should be greater than or equal to 90% - Assert.IsTrue(accuracy >= 90); + Assert.IsGreaterThanOrEqualTo(90,accuracy); } @@ -133,7 +133,7 @@ public void Test_RandomForest_Regression() var lmR2 = GoodnessOfFit.RSquared(Y_test.Array, lmPredict); // Random Forest is better - Assert.IsTrue(rfR2 > lmR2); + Assert.IsGreaterThan(lmR2,rfR2); } } diff --git a/Test_Numerics/Machine Learning/Supervised/Test_kNN.cs b/Test_Numerics/Machine Learning/Supervised/Test_kNN.cs index eda31b02..45f44a50 100644 --- a/Test_Numerics/Machine Learning/Supervised/Test_kNN.cs +++ b/Test_Numerics/Machine Learning/Supervised/Test_kNN.cs @@ -162,7 +162,7 @@ public void Test_kNN_Regression() var lmR2 = GoodnessOfFit.RSquared(Y_test.Array, lmPredict); // kNN is better - Assert.IsTrue(knnR2 > lmR2); + Assert.IsGreaterThan(lmR2,knnR2 ); } diff --git a/Test_Numerics/Mathematics/Differentiation/Test_Differentiation.cs b/Test_Numerics/Mathematics/Differentiation/Test_Differentiation.cs index 8525b4b7..8185c3bb 100644 --- a/Test_Numerics/Mathematics/Differentiation/Test_Differentiation.cs +++ b/Test_Numerics/Mathematics/Differentiation/Test_Differentiation.cs @@ -242,7 +242,7 @@ public void Test_ForwardDifference_CustomStepSize() double derivLargeStep = NumericalDerivative.ForwardDifference(FX, 2.0, 1E-2); // Small step should be more accurate - Assert.IsTrue(Math.Abs(derivSmallStep - 12.0) < Math.Abs(derivLargeStep - 12.0)); + Assert.IsLessThan(Math.Abs(derivLargeStep - 12.0),Math.Abs(derivSmallStep - 12.0) ); } #endregion @@ -286,8 +286,8 @@ public void Test_CentralDifference_HigherAccuracy() double errorBackward = Math.Abs(backward - expected); double errorCentral = Math.Abs(central - expected); - Assert.IsTrue(errorCentral < errorForward); - Assert.IsTrue(errorCentral < errorBackward); + Assert.IsLessThan(errorForward, errorCentral); + Assert.IsLessThan(errorBackward, errorCentral); } /// @@ -439,7 +439,7 @@ public void Test_RiddersMethod_HighAccuracy() // f(x) = x³, f'(2) = 12 double derivFX = NumericalDerivative.RiddersMethod(FX, 2.0, out double errFX); Assert.AreEqual(12.0, derivFX, 1E-6); - Assert.IsTrue(errFX < 1E-4); // Error estimate should be small + Assert.IsLessThan(1E-4, errFX); // Error estimate should be small // f(x) = e^x, f'(4) = e⁴ double derivEX = NumericalDerivative.RiddersMethod(EX, 4.0, out double errEX); @@ -470,7 +470,7 @@ public void Test_RiddersMethod_MoreAccurateThanCentral() // For smooth transcendental functions, Ridders should be significantly better // Expect at least 10× improvement (often 100-1000× for exponentials) - Assert.IsTrue(errorRidders < errorCentral * 0.5); + Assert.IsLessThan(errorCentral * 0.5, errorRidders); } /// @@ -485,8 +485,8 @@ public void Test_RiddersMethod_ErrorEstimate() // Error estimate should be in a reasonable range // For smooth polynomial functions, error estimate should be conservative - Assert.IsTrue(err >= 0); - Assert.IsTrue(err < 1.0); // Should be reasonably small for polynomial + Assert.IsGreaterThanOrEqualTo(0,err); + Assert.IsLessThan(1.0, err); // Should be reasonably small for polynomial } #endregion @@ -547,8 +547,8 @@ public void Test_Gradient_WithLowerBounds() var grad = NumericalDerivative.Gradient(FXY, point, lowerBounds); // Should still compute reasonable gradients - Assert.IsTrue(Math.Abs(grad[0]) < 1.0); - Assert.IsTrue(Math.Abs(grad[1]) < 1.0); + Assert.IsLessThan(1.0, Math.Abs(grad[0])); + Assert.IsLessThan(1.0, Math.Abs(grad[1])); } /// @@ -651,8 +651,8 @@ public void Test_Jacobian_WithBounds_LowerBound() var jac = NumericalDerivative.Jacobian(VectorFunction, point, lowerBounds); // Should compute reasonable values despite being near boundary - Assert.IsTrue(!double.IsNaN(jac[0, 0])); - Assert.IsTrue(!double.IsNaN(jac[1, 1])); + Assert.IsFalse(double.IsNaN(jac[0, 0])); + Assert.IsFalse(double.IsNaN(jac[1, 1])); } /// @@ -667,8 +667,8 @@ public void Test_Jacobian_WithBounds_UpperBound() var jac = NumericalDerivative.Jacobian(VectorFunction, point, upperBounds); // Should compute reasonable values despite being near boundary - Assert.IsTrue(!double.IsNaN(jac[0, 0])); - Assert.IsTrue(!double.IsNaN(jac[1, 1])); + Assert.IsFalse(double.IsNaN(jac[0, 0])); + Assert.IsFalse(double.IsNaN(jac[1, 1])); } /// @@ -789,8 +789,8 @@ public void Test_Hessian_Rosenbrock() var hess = NumericalDerivative.Hessian(Rosenbrock, new[] { 1.0, 1.0 }); // All diagonal elements should be positive at minimum - Assert.IsTrue(hess[0, 0] > 0); - Assert.IsTrue(hess[1, 1] > 0); + Assert.IsGreaterThan(0,hess[0, 0]); + Assert.IsGreaterThan(0,hess[1, 1]); // Hessian should be symmetric Assert.AreEqual(hess[0, 1], hess[1, 0], 1E-8); @@ -808,8 +808,8 @@ public void Test_Hessian_WithLowerBounds() var hess = NumericalDerivative.Hessian(FXY, point, lowerBounds); // Should compute without errors - Assert.IsTrue(!double.IsNaN(hess[0, 0])); - Assert.IsTrue(!double.IsNaN(hess[1, 1])); + Assert.IsFalse(double.IsNaN(hess[0, 0])); + Assert.IsFalse(double.IsNaN(hess[1, 1])); } /// @@ -824,8 +824,8 @@ public void Test_Hessian_WithUpperBounds() var hess = NumericalDerivative.Hessian(FXY, point, upperBounds); // Should compute without errors - Assert.IsTrue(!double.IsNaN(hess[0, 0])); - Assert.IsTrue(!double.IsNaN(hess[1, 1])); + Assert.IsFalse(double.IsNaN(hess[0, 0])); + Assert.IsFalse(double.IsNaN(hess[1, 1])); } #endregion @@ -842,7 +842,7 @@ public void Test_CalculateStepSize_FirstDerivative() double h2 = NumericalDerivative.CalculateStepSize(10.0, 1); // Step size should scale with magnitude of input - Assert.IsTrue(h2 > h1); + Assert.IsGreaterThan(h1,h2); // Should be reasonable for finite differences Assert.IsTrue(h1 > 1E-10 && h1 < 1E-5); @@ -858,7 +858,7 @@ public void Test_CalculateStepSize_SecondDerivative() double h2 = NumericalDerivative.CalculateStepSize(1.0, 2); // Second derivative needs larger step size - Assert.IsTrue(h2 > h1); + Assert.IsGreaterThan(h1, h2); } /// @@ -870,8 +870,8 @@ public void Test_CalculateStepSize_AtZero() double h = NumericalDerivative.CalculateStepSize(0.0); // Should return reasonable value even at zero - Assert.IsTrue(h > 0); - Assert.IsTrue(h < 1E-4); + Assert.IsGreaterThan(0, h ); + Assert.IsLessThan(1E-4,h); } #endregion @@ -889,7 +889,7 @@ public void Test_Derivative_LargeValues() double deriv = NumericalDerivative.Derivative(x => x * x, 1E6); double expected = 2E6; double relativeError = Math.Abs((deriv - expected) / expected); - Assert.IsTrue(relativeError < 1E-6); // 0.0001% relative error + Assert.IsLessThan(1E-6,relativeError); // 0.0001% relative error } /// diff --git a/Test_Numerics/Mathematics/Integration/Test_Vegas.cs b/Test_Numerics/Mathematics/Integration/Test_Vegas.cs index 89af2575..1c25940e 100644 --- a/Test_Numerics/Mathematics/Integration/Test_Vegas.cs +++ b/Test_Numerics/Mathematics/Integration/Test_Vegas.cs @@ -209,12 +209,12 @@ public void Test_PowerTransform_RareUpperTailEvent() // Analytical approximation: P(Sum > mean + 3σ) = P(Z > 3) ≈ 0.00135 // We expect something in the ballpark of 1e-3 to 2e-3 - Assert.IsTrue(failureProbability > 5E-4, $"Probability too small: {failureProbability:E6}"); - Assert.IsTrue(failureProbability < 5E-3, $"Probability too large: {failureProbability:E6}"); + Assert.IsGreaterThan(5E-4, failureProbability, $"Probability too small: {failureProbability:E6}"); + Assert.IsLessThan(5E-3, failureProbability, $"Probability too large: {failureProbability:E6}"); // Standard error should be reasonable (less than 50% of estimate) double relativeError = vegas.StandardError / Math.Abs(failureProbability); - Assert.IsTrue(relativeError < 0.5, $"Relative error too large: {relativeError:P1}"); + Assert.IsLessThan(0.5, relativeError, $"Relative error too large: {relativeError:P1}"); } /// @@ -255,14 +255,14 @@ public void Test_PowerTransform_VeryRareEvent() var failureProbability = vegas.Result; // Should be in ballpark of 1e-6 to 1e-5 - Assert.IsTrue(failureProbability > 1E-7, $"Probability too small: {failureProbability:E2}"); - Assert.IsTrue(failureProbability < 1E-4, $"Probability too large: {failureProbability:E2}"); + Assert.IsGreaterThan(1E-7, failureProbability, $"Probability too small: {failureProbability:E2}"); + Assert.IsLessThan(1E-4,failureProbability, $"Probability too large: {failureProbability:E2}"); // For very rare events, relative error can be higher but should be finite double relativeError = vegas.StandardError / Math.Abs(failureProbability); - Assert.IsTrue(relativeError < 1.0, $"Relative error too large: {relativeError:P1}"); - Assert.IsTrue(!double.IsNaN(failureProbability), "Result should not be NaN"); - Assert.IsTrue(!double.IsInfinity(failureProbability), "Result should not be infinite"); + Assert.IsLessThan(1.0, relativeError, $"Relative error too large: {relativeError:P1}"); + Assert.IsFalse(double.IsNaN(failureProbability), "Result should not be NaN"); + Assert.IsFalse(double.IsInfinity(failureProbability), "Result should not be infinite"); } /// @@ -311,13 +311,13 @@ public void Test_PowerTransform_ProbabilityRange() vegas.Integrate(); // Verify samples were generated - Assert.IsTrue(sampleCount > 0, "No samples generated"); + Assert.IsGreaterThan(0,sampleCount , "No samples generated"); // With γ=4, should see strong tail focus (many samples near 1.0) - Assert.IsTrue(maxObserved > 0.99, $"Max probability too low: {maxObserved}"); + Assert.IsGreaterThan(0.99,maxObserved, $"Max probability too low: {maxObserved}"); // Should still have some diversity (not all at 1.0) - Assert.IsTrue(minObserved < 0.5, $"Min probability too high: {minObserved}"); + Assert.IsLessThan(0.5,minObserved, $"Min probability too high: {minObserved}"); } diff --git a/Test_Numerics/Mathematics/Linear Algebra/Test_EigenValueDecomposition.cs b/Test_Numerics/Mathematics/Linear Algebra/Test_EigenValueDecomposition.cs index 80671717..8bfc4c58 100644 --- a/Test_Numerics/Mathematics/Linear Algebra/Test_EigenValueDecomposition.cs +++ b/Test_Numerics/Mathematics/Linear Algebra/Test_EigenValueDecomposition.cs @@ -185,7 +185,7 @@ public void SymEig_3x3_RepeatedEigenvalues_AllTwos() // Max eigen residual var maxRes = MaxEigenResidual(A, V, w); - Assert.IsTrue(maxRes < 1e-12, $"Max eigen residual too large: {maxRes}"); + Assert.IsLessThan(1e-12, maxRes, $"Max eigen residual too large: {maxRes}"); } /// @@ -232,7 +232,7 @@ public void SymEig_8x8_TridiagonalToeplitz_KnownSpectrum() // Max eigen residual var maxRes = MaxEigenResidual(A, V, w); - Assert.IsTrue(maxRes < 1e-8, $"Max eigen residual too large: {maxRes}"); + Assert.IsLessThan(1e-8, maxRes, $"Max eigen residual too large: {maxRes}"); } /// @@ -268,7 +268,7 @@ public void SymEig_5x5_NearlyDiagonal_SmallCoupling() // Max eigen residual var maxRes = MaxEigenResidual(A, V, w); - Assert.IsTrue(maxRes < 1e-8, $"Max eigen residual too large: {maxRes}"); + Assert.IsLessThan(1e-8, maxRes, $"Max eigen residual too large: {maxRes}"); } // ---------- Helpers ---------- diff --git a/Test_Numerics/Mathematics/Linear Algebra/Test_GaussJordanElimination.cs b/Test_Numerics/Mathematics/Linear Algebra/Test_GaussJordanElimination.cs index 8359847e..1de9a5f5 100644 --- a/Test_Numerics/Mathematics/Linear Algebra/Test_GaussJordanElimination.cs +++ b/Test_Numerics/Mathematics/Linear Algebra/Test_GaussJordanElimination.cs @@ -59,7 +59,7 @@ public void Test_GaussJordanElim() for (int i = 0; i < A.NumberOfRows; i++) { for (int j = 0; j < A.NumberOfColumns - 1; j++) - Assert.AreEqual(A[i, j] == true_IA[i, j], true); + Assert.AreEqual(A[i, j],true_IA[i, j]); } /// Recreated Gauss Jordan test in R to compare the inverted A matrices. diff --git a/Test_Numerics/Mathematics/Special Functions/Test_Gamma.cs b/Test_Numerics/Mathematics/Special Functions/Test_Gamma.cs index fd28ae2c..8c67223d 100644 --- a/Test_Numerics/Mathematics/Special Functions/Test_Gamma.cs +++ b/Test_Numerics/Mathematics/Special Functions/Test_Gamma.cs @@ -68,7 +68,7 @@ public void Test_Function() for (int i = 0; i < testValid.Length; i++) { testResults[i] = Gamma.Function(testX[i]); - Assert.AreEqual(Math.Abs(testValid[i] - testResults[i]) / testValid[i] < 0.01, true); + Assert.IsLessThan(0.01d, Math.Abs(testValid[i] - testResults[i]) / testValid[i]); } } @@ -85,7 +85,7 @@ public void Test_Lanczos() for (int i = 0; i < testValid.Length; i++) { testResults[i] = Gamma.Lanczos(testX[i]); - Assert.AreEqual(Math.Abs(testValid[i] - testResults[i]) / testValid[i] < 0.01, true); + Assert.IsLessThan(0.01d, Math.Abs(testValid[i] - testResults[i]) / testValid[i]); } } @@ -157,7 +157,7 @@ public void Test_Trigamma() for (int i = 0; i < testValid.Length; i++) { testResults[i] = Gamma.Trigamma(testX[i]); - Assert.AreEqual(Math.Abs(testValid[i] - testResults[i]) / testValid[i] < 0.01, true); + Assert.IsLessThan(0.01d,Math.Abs(testValid[i] - testResults[i]) / testValid[i]); } } @@ -187,7 +187,7 @@ public void Test_LogGamma() for (int i = 0; i < testValid.Length; i++) { testResults[i] = Gamma.LogGamma(testX[i]); - Assert.AreEqual(Math.Abs(testValid[i] - testResults[i]) / testValid[i] < 0.01, true); + Assert.IsLessThan(0.01d,Math.Abs(testValid[i] - testResults[i]) / testValid[i]); } } @@ -227,7 +227,7 @@ public void Test_Incomplete() for (int i = 0; i < testValid.Length; i++) { testResults[i] = Gamma.Incomplete(testX[i], testA[i]); - Assert.AreEqual(Math.Abs(testValid[i] - testResults[i]) / testValid[i] < 0.01, true); + Assert.IsLessThan(0.01d, Math.Abs(testValid[i] - testResults[i]) / testValid[i]); } } @@ -460,7 +460,7 @@ public void Test_InverseUpperIncomplete() double x = Gamma.UpperIncomplete(lambda, i); double j = Gamma.InverseUpperIncomplete(lambda, x); - Assert.IsTrue(Math.Abs(i - j) < 1e-2 * Math.Abs(j)); + Assert.IsLessThan(1e-2 * Math.Abs(j),Math.Abs(i - j) ); } } } diff --git a/Test_Numerics/Mathematics/Special Functions/Test_SpecialFunctions.cs b/Test_Numerics/Mathematics/Special Functions/Test_SpecialFunctions.cs index eef18c7f..2e6f3f7d 100644 --- a/Test_Numerics/Mathematics/Special Functions/Test_SpecialFunctions.cs +++ b/Test_Numerics/Mathematics/Special Functions/Test_SpecialFunctions.cs @@ -208,7 +208,7 @@ public void Test_CombinationsNum() double possible = Math.Pow(2, 5) - 1; // Length of cc should be the possible number of combinations * the number of elements in each combination (5) - Assert.AreEqual(possible * 5, cc.Length); + Assert.HasCount((int)possible * 5, cc); // How many of subsets of combinations there should be // For example, there are 5 ways to have only one #1 in the array, with the other 4 elements being #0 diff --git a/Test_Numerics/Sampling/Test_Stratification.cs b/Test_Numerics/Sampling/Test_Stratification.cs index 8a6f04d4..7ce29f83 100644 --- a/Test_Numerics/Sampling/Test_Stratification.cs +++ b/Test_Numerics/Sampling/Test_Stratification.cs @@ -117,7 +117,7 @@ public void Test_XToProbability() weights += probs[i].Weight; } // Check weights sum to 1.0 - Assert.AreEqual(weights, 1.0d, 1E-8); + Assert.AreEqual(1.0d,weights, 1E-8); } /// @@ -143,7 +143,7 @@ public void Test_XToExceedanceProbability() weights += probs[i].Weight; } // Check weights sum to 1.0 - Assert.AreEqual(weights, 1.0d, 1E-8); + Assert.AreEqual(1.0d, weights, 1E-8); } @@ -203,7 +203,7 @@ public void Test_Probabilities() weights += bins[i].Weight; } // Check weights sum to 1.0 - Assert.AreEqual(weights, 1.0d, 1E-8); + Assert.AreEqual(1.0d, weights, 1E-8); } /// @@ -225,7 +225,7 @@ public void Test_Probabilities_Multi() weights += bins[i].Weight; } // Check weights sum to 1.0 - Assert.AreEqual(weights, 1.0d, 1E-8); + Assert.AreEqual(1.0d, weights, 1E-8); } /// @@ -244,7 +244,7 @@ public void Test_Probabilities_Log10() weights += bins[i].Weight; } // Check weights sum to 1.0 - Assert.AreEqual(weights, 1.0d, 1E-8); + Assert.AreEqual(1.0d, weights, 1E-8); } /// @@ -264,7 +264,7 @@ public void Test_Probabilities_Normal() weights += bins[i].Weight; } // Check weights sum to 1.0 - Assert.AreEqual(weights, 1.0d, 1E-8); + Assert.AreEqual(1.0d, weights, 1E-8); } /// @@ -283,7 +283,7 @@ public void Test_ExceedanceProbabilities() weights += bins[i].Weight; } // Check weights sum to 1.0 - Assert.AreEqual(weights, 1.0d, 1E-8); + Assert.AreEqual(1.0d, weights, 1E-8); } /// @@ -305,7 +305,7 @@ public void Test_ExceedanceProbabilities_Multi() weights += bins[i].Weight; } // Check weights sum to 1.0 - Assert.AreEqual(weights, 1.0d, 1E-8); + Assert.AreEqual(1.0d, weights, 1E-8); } /// @@ -324,7 +324,7 @@ public void Test_ExceedanceProbabilities_Log10() weights += bins[i].Weight; } // Check weights sum to 1.0 - Assert.AreEqual(weights, 1.0d, 1E-8); + Assert.AreEqual(1.0d, weights, 1E-8); } /// @@ -343,7 +343,7 @@ public void Test_ExceedanceProbabilities_Normal() weights += bins[i].Weight; } // Check weights sum to 1.0 - Assert.AreEqual(weights, 1.0d, 1E-8); + Assert.AreEqual(1.0d, weights, 1E-8); } } } diff --git a/Test_Numerics/Serialization/JsonConverterDemo.cs b/Test_Numerics/Serialization/JsonConverterDemo.cs index 50e936c4..c98258fd 100644 --- a/Test_Numerics/Serialization/JsonConverterDemo.cs +++ b/Test_Numerics/Serialization/JsonConverterDemo.cs @@ -39,14 +39,14 @@ namespace Test_Numerics.Serialization /// Demonstration of custom JSON converters for complex types. /// [TestClass] - public static class JsonConverterDemo + public class JsonConverterDemo { /// /// Demonstrates how the custom converters handle 2D arrays and complex distribution objects. /// /// [TestMethod] - public static void RunDemo() + public void RunDemo() { Console.WriteLine("=== JSON Converter Demo ===\n"); diff --git a/Test_Numerics/Serialization/Test_JsonSerialization.cs b/Test_Numerics/Serialization/Test_JsonSerialization.cs index 91ec8246..94cad976 100644 --- a/Test_Numerics/Serialization/Test_JsonSerialization.cs +++ b/Test_Numerics/Serialization/Test_JsonSerialization.cs @@ -99,14 +99,16 @@ public void Test_UncertaintyAnalysisResults_ArraySerialization() // Assert Assert.IsNotNull(deserialized.ModeCurve); - Assert.AreEqual(original.ModeCurve.Length, deserialized.ModeCurve.Length); + var modeCurveLen = original.ModeCurve.Length; + Assert.HasCount(modeCurveLen, deserialized.ModeCurve); for (int i = 0; i < original.ModeCurve.Length; i++) { Assert.AreEqual(original.ModeCurve[i], deserialized.ModeCurve[i], 1e-10); } Assert.IsNotNull(deserialized.MeanCurve); - Assert.AreEqual(original.MeanCurve.Length, deserialized.MeanCurve.Length); + var meanCurveLen = original.MeanCurve.Length; + Assert.HasCount(meanCurveLen, deserialized.MeanCurve); for (int i = 0; i < original.MeanCurve.Length; i++) { Assert.AreEqual(original.MeanCurve[i], deserialized.MeanCurve[i], 1e-10); @@ -163,7 +165,8 @@ public void Test_UncertaintyAnalysisResults_ParameterSetsSerialization() // Assert Assert.IsNotNull(deserialized.ParameterSets); - Assert.AreEqual(original.ParameterSets.Length, deserialized.ParameterSets.Length); + var parameterSetsLen = original.ParameterSets.Length; + Assert.HasCount(parameterSetsLen, deserialized.ParameterSets); for (int i = 0; i < original.ParameterSets.Length; i++) { @@ -173,7 +176,8 @@ public void Test_UncertaintyAnalysisResults_ParameterSetsSerialization() if (original.ParameterSets[i].Values != null) { Assert.IsNotNull(deserialized.ParameterSets[i].Values); - Assert.AreEqual(original.ParameterSets[i].Values.Length, deserialized.ParameterSets[i].Values.Length); + var parameterValuesLen = original.ParameterSets[i].Values.Length; + Assert.HasCount(parameterValuesLen, deserialized.ParameterSets[i].Values); for (int j = 0; j < original.ParameterSets[i].Values.Length; j++) { @@ -246,11 +250,11 @@ public void Test_UncertaintyAnalysisResults_EmptyArrays() // Assert Assert.IsNotNull(deserialized); Assert.IsNotNull(deserialized.ParameterSets); - Assert.AreEqual(0, deserialized.ParameterSets.Length); + Assert.HasCount(0, deserialized.ParameterSets); Assert.IsNotNull(deserialized.ModeCurve); - Assert.AreEqual(0, deserialized.ModeCurve.Length); + Assert.HasCount(0, deserialized.ModeCurve); Assert.IsNotNull(deserialized.MeanCurve); - Assert.AreEqual(0, deserialized.MeanCurve.Length); + Assert.HasCount(0, deserialized.MeanCurve); } /// @@ -296,7 +300,8 @@ public void Test_MCMCResults_BasicSerialization() // Assert Assert.IsNotNull(deserialized); Assert.IsNotNull(deserialized.AcceptanceRates); - Assert.AreEqual(original.AcceptanceRates.Length, deserialized.AcceptanceRates.Length); + var acceptanceRatesLen = original.AcceptanceRates.Length; + Assert.HasCount(acceptanceRatesLen, deserialized.AcceptanceRates); for (int i = 0; i < original.AcceptanceRates.Length; i++) { @@ -325,12 +330,14 @@ public void Test_MCMCResults_MarkovChainsSerialization() // Assert Assert.IsNotNull(deserialized.MarkovChains); - Assert.AreEqual(original.MarkovChains.Length, deserialized.MarkovChains.Length); + var markovChainsLen = original.MarkovChains.Length; + Assert.HasCount(markovChainsLen, deserialized.MarkovChains); for (int i = 0; i < original.MarkovChains.Length; i++) { Assert.IsNotNull(deserialized.MarkovChains[i]); - Assert.AreEqual(original.MarkovChains[i].Count, deserialized.MarkovChains[i].Count); + var chainCount = original.MarkovChains[i].Count; + Assert.HasCount(chainCount, deserialized.MarkovChains[i]); for (int j = 0; j < original.MarkovChains[i].Count; j++) { @@ -368,7 +375,8 @@ public void Test_MCMCResults_OutputSerialization() // Assert Assert.IsNotNull(deserialized.Output); - Assert.AreEqual(original.Output.Count, deserialized.Output.Count); + var outputCount = original.Output.Count; + Assert.HasCount(outputCount, deserialized.Output); for (int i = 0; i < original.Output.Count; i++) { @@ -423,7 +431,8 @@ public void Test_MCMCResults_MeanLogLikelihoodSerialization() // Assert Assert.IsNotNull(deserialized.MeanLogLikelihood); - Assert.AreEqual(original.MeanLogLikelihood.Count, deserialized.MeanLogLikelihood.Count); + var meanLogLikelihoodCount = original.MeanLogLikelihood.Count; + Assert.HasCount(meanLogLikelihoodCount, deserialized.MeanLogLikelihood); for (int i = 0; i < original.MeanLogLikelihood.Count; i++) { @@ -461,9 +470,9 @@ public void Test_MCMCResults_EmptyChains() // Assert Assert.IsNotNull(deserialized); Assert.IsNotNull(deserialized.MarkovChains); - Assert.AreEqual(2, deserialized.MarkovChains.Length); - Assert.AreEqual(0, deserialized.MarkovChains[0].Count); - Assert.AreEqual(0, deserialized.MarkovChains[1].Count); + Assert.HasCount(2, deserialized.MarkovChains); + Assert.HasCount(0, deserialized.MarkovChains[0]); + Assert.HasCount(0, deserialized.MarkovChains[1]); } /// @@ -488,7 +497,8 @@ public void Test_MCMCResults_LargeDataSet() // Assert Assert.IsNotNull(deserialized); - Assert.AreEqual(original.MarkovChains.Length, deserialized.MarkovChains.Length); + var origChainsLen = original.MarkovChains.Length; + Assert.HasCount(origChainsLen, deserialized.MarkovChains); // Verify first and last elements to ensure proper serialization var firstOriginal = original.MarkovChains[0][0]; @@ -531,15 +541,15 @@ public void Test_JsonSerializerOptions_Configuration() // Assert // Verify that WriteIndented is false (no formatting whitespace) - Assert.IsFalse(jsonString.Contains("\n")); - Assert.IsFalse(jsonString.Contains(" ")); // No indentation + Assert.DoesNotContain("\n",jsonString); + Assert.DoesNotContain(" ",jsonString); // No indentation // Verify that null values are not included (DefaultIgnoreCondition) - Assert.IsFalse(jsonString.Contains("\"ParentDistribution\":null")); + Assert.DoesNotContain("\"ParentDistribution\":null", jsonString); // Verify that fields are included (IncludeFields = true) - Assert.IsTrue(jsonString.Contains("\"AIC\":")); - Assert.IsTrue(jsonString.Contains("\"BIC\":")); + Assert.Contains("\"AIC\":",jsonString); + Assert.Contains("\"BIC\":",jsonString); } #endregion diff --git a/Test_Numerics/Utilities/Test_ExtensionMethods.cs b/Test_Numerics/Utilities/Test_ExtensionMethods.cs index dfd296ab..a348919f 100644 --- a/Test_Numerics/Utilities/Test_ExtensionMethods.cs +++ b/Test_Numerics/Utilities/Test_ExtensionMethods.cs @@ -84,7 +84,7 @@ public void Test_NextIntegers() { var random = new Random(); var result = random.NextIntegers(5); - Assert.AreEqual(5, result.Length); + Assert.HasCount(5, result); } /// @@ -95,7 +95,7 @@ public void Test_NextIntegersMinMax() { var random = new Random(); var result = random.NextIntegers(0, 10, 5); - Assert.AreEqual(5, result.Length); + Assert.HasCount(5, result); for (int i = 0; i < result.Length; i++) Assert.IsTrue(result[i] >= 0 && result[i] < 10); } @@ -134,7 +134,7 @@ public void Test_NextDoubles() { var random = new Random(); var result = random.NextDoubles(5); - Assert.AreEqual(5, result.Length); + Assert.HasCount(5, result); for (int i = 0; i < result.Length; i++) Assert.IsTrue(result[i] >= 0 && result[i] < 1); } @@ -275,7 +275,7 @@ public void Test_Random_Subset() var array = new double[] { 1, 2, 3, 4, 5, 6, 7, 8, 9 }; var result = array.RandomSubset(5); CollectionAssert.IsSubsetOf(result, array); - Assert.AreEqual(5, result.Length); + Assert.HasCount(5, result); } /// @@ -420,7 +420,7 @@ public void Test_Vector_Random_Subset() var vector = new Vector(new double[] { 1, 2, 3, 4, 5, 6, 7, 8, 9 }); var result = vector.RandomSubset(5).Array; CollectionAssert.IsSubsetOf(result, vector.Array); - Assert.AreEqual(5, result.Length); + Assert.HasCount(5, result); } /// diff --git a/Test_Numerics/Utilities/Test_Tools.cs b/Test_Numerics/Utilities/Test_Tools.cs index 40a5b147..c1120d0d 100644 --- a/Test_Numerics/Utilities/Test_Tools.cs +++ b/Test_Numerics/Utilities/Test_Tools.cs @@ -234,7 +234,7 @@ public void Test_SumIndicator() List values = new List { 1.4, 2.3, 3.2, 3.3d }; List predictors = new List { 1, 1, 1, 0 }; var result = Tools.Sum(values,predictors); - Assert.AreEqual(result, 6.9); + Assert.AreEqual(6.9, result); } /// @@ -247,7 +247,7 @@ public void Test_SumProduct() List list2 = new List { 4, 5, 6 }; var result = Tools.SumProduct(list1, list2); - Assert.AreEqual(result, 32); + Assert.AreEqual(32, result); } /// @@ -258,7 +258,7 @@ public void Test_Mean() { List values = new List { 1, 2, 3 }; var result = Tools.Mean(values); - Assert.AreEqual(result, 2); + Assert.AreEqual(2, result); } /// @@ -270,7 +270,7 @@ public void Test_MeanIndicator() List values = new List { 1, 2, 3,4 }; List indicators = new List { 1, 1, 1, 0 }; var result = Tools.Mean(values,indicators); - Assert.AreEqual(result, 2); + Assert.AreEqual(2, result); } /// @@ -281,7 +281,7 @@ public void Test_Product() { List values = new List { 1, 2, 3 }; var result = Tools.Product(values); - Assert.AreEqual(result, 6); + Assert.AreEqual(6, result); } /// @@ -293,7 +293,7 @@ public void Test_ProductIndicator() List values = new List { 1, 2, 3,4 }; List indicators = new List { 1,1,1,0 }; var result = Tools.Product(values,indicators); - Assert.AreEqual(result, 6); + Assert.AreEqual(6, result); } /// @@ -304,7 +304,7 @@ public void Test_Min() { List values = new List { 1, 2, 3 }; var result = Tools.Min(values); - Assert.AreEqual(result, 1); + Assert.AreEqual(1, result); } /// @@ -315,7 +315,7 @@ public void Test_ArgMin() { List values = new List { 1, 2, 3 }; var result = Tools.ArgMin(values); - Assert.AreEqual(result, 0); + Assert.AreEqual(0, result); } /// @@ -326,7 +326,7 @@ public void Test_Max() { List values = new List { 1, 2, 3 }; var result = Tools.Max(values); - Assert.AreEqual(result, 3); + Assert.AreEqual(3, result); } @@ -338,7 +338,7 @@ public void Test_ArgMax() { List values = new List { 1, 2, 3 }; var result = Tools.ArgMax(values); - Assert.AreEqual(result, 2); + Assert.AreEqual(2, result ); } /// @@ -350,7 +350,7 @@ public void Test_MinIndicator() List values = new List { 1, 2, 3, 4}; List indicators = new List { 0, 1, 1, 1 }; var result = Tools.Min(values,indicators); - Assert.AreEqual(result, 2); + Assert.AreEqual(2,result); } /// @@ -362,7 +362,7 @@ public void Test_MaxIndicator() List values = new List { 1, 2, 3 }; List indicators = new List { 1, 0, 0 }; var result = Tools.Max(values, indicators); - Assert.AreEqual(result, 1); + Assert.AreEqual(1,result); } /// @@ -380,8 +380,8 @@ public void Test_LogSumExp() var result = Tools.LogSumExp(u, v); var result2 = Tools.LogSumExp(values); - Assert.AreEqual(result, 1000.70815, 1E-04); - Assert.AreEqual(result2, 1000.70815, 1E-04); + Assert.AreEqual(1000.70815, result, 1E-04); + Assert.AreEqual(1000.70815, result2, 1E-04); } /// @@ -411,7 +411,7 @@ public void Test_Compress() data[1] = 128; data[2] = 255; var result = Tools.Compress(data); - Assert.IsTrue(data.Length <= result.Length); + Assert.IsLessThanOrEqualTo(result.Length,data.Length ); } /// @@ -425,7 +425,7 @@ public void Test_Decompress() data[1] = 128; data[2] = 255; var result = Tools.Decompress(data); - Assert.IsTrue(data.Length >= result.Length); + Assert.IsGreaterThanOrEqualTo(result.Length,data.Length); } } From 355e3b1315f3da54d3e4527dd0dcdf433e136727 Mon Sep 17 00:00:00 2001 From: Tiki Gonzalez Date: Tue, 30 Dec 2025 15:57:32 -0700 Subject: [PATCH 2/3] staging merge --- .github/workflows/Snapshot.yml | 1 + .../Differentiation/NumericalDerivative.cs | 6 +- README.md | 11 + .../Univariate/Test_EmpiricalDistribution.cs | 6 +- .../Test_GeneralizedExtremeValue.cs | 34 +- .../Univariate/Test_GeneralizedLogistic.cs | 16 +- .../Univariate/Test_GeneralizedNormal.cs | 2 +- .../Univariate/Test_GeneralizedPareto.cs | 10 +- .../Univariate/Test_Geometric.cs | 22 +- .../Distributions/Univariate/Test_Gumbel.cs | 14 +- .../Univariate/Test_InverseChiSquared.cs | 20 +- .../Univariate/Test_InverseGamma.cs | 12 +- .../Univariate/Test_KappaFour.cs | 4 +- .../Distributions/Univariate/Test_LnNormal.cs | 10 +- .../Univariate/Test_LogNormal.cs | 10 +- .../Univariate/Test_LogPearsonTypeIII.cs | 12 +- .../Distributions/Univariate/Test_Logistic.cs | 22 +- .../Univariate/Test_NoncentralT.cs | 8 +- .../Distributions/Univariate/Test_Normal.cs | 8 +- .../Distributions/Univariate/Test_Pareto.cs | 12 +- .../Univariate/Test_PearsonTypeIII.cs | 4 +- .../Distributions/Univariate/Test_Pert.cs | 6 +- .../Univariate/Test_PertPercentileDists.cs | 12 +- .../Distributions/Univariate/Test_Poisson.cs | 14 +- .../Supervised/Test_DecisionTree.cs | 2 +- .../Supervised/Test_RandomForest.cs | 2 +- .../Machine Learning/Supervised/Test_kNN.cs | 2 +- .../Differentiation/Test_Differentiation.cs | 16 +- .../Test_SpecialFunctions.cs | 1 + .../Serialization/Test_JsonSerialization.cs | 8 + Test_Numerics/Utilities/Test_Tools.cs | 14 +- docs/data/interpolation.md | 291 +++++++ docs/data/time-series.md | 498 ++++++++++++ docs/distributions/copulas.md | 246 ++++++ docs/distributions/multivariate.md | 495 ++++++++++++ docs/distributions/parameter-estimation.md | 586 +++++++++++++++ docs/distributions/uncertainty-analysis.md | 658 ++++++++++++++++ docs/distributions/univariate.md | 568 ++++++++++++++ docs/getting-started.md | 347 +++++++++ docs/index.md | 216 ++++++ docs/machine-learning/overview.md | 711 ++++++++++++++++++ docs/mathematics/differentiation.md | 446 +++++++++++ docs/mathematics/integration.md | 608 +++++++++++++++ docs/mathematics/linear-algebra.md | 490 ++++++++++++ docs/mathematics/ode-solvers.md | 357 +++++++++ docs/mathematics/optimization.md | 592 +++++++++++++++ docs/mathematics/root-finding.md | 473 ++++++++++++ docs/mathematics/special-functions.md | 388 ++++++++++ docs/references.md | 203 +++++ docs/sampling/convergence-diagnostics.md | 515 +++++++++++++ docs/sampling/mcmc.md | 632 ++++++++++++++++ docs/sampling/random-generation.md | 440 +++++++++++ docs/statistics/descriptive.md | 530 +++++++++++++ docs/statistics/goodness-of-fit.md | 596 +++++++++++++++ docs/statistics/hypothesis-tests.md | 454 +++++++++++ global.json | 6 + 56 files changed, 11517 insertions(+), 150 deletions(-) create mode 100644 docs/data/interpolation.md create mode 100644 docs/data/time-series.md create mode 100644 docs/distributions/copulas.md create mode 100644 docs/distributions/multivariate.md create mode 100644 docs/distributions/parameter-estimation.md create mode 100644 docs/distributions/uncertainty-analysis.md create mode 100644 docs/distributions/univariate.md create mode 100644 docs/getting-started.md create mode 100644 docs/index.md create mode 100644 docs/machine-learning/overview.md create mode 100644 docs/mathematics/differentiation.md create mode 100644 docs/mathematics/integration.md create mode 100644 docs/mathematics/linear-algebra.md create mode 100644 docs/mathematics/ode-solvers.md create mode 100644 docs/mathematics/optimization.md create mode 100644 docs/mathematics/root-finding.md create mode 100644 docs/mathematics/special-functions.md create mode 100644 docs/references.md create mode 100644 docs/sampling/convergence-diagnostics.md create mode 100644 docs/sampling/mcmc.md create mode 100644 docs/sampling/random-generation.md create mode 100644 docs/statistics/descriptive.md create mode 100644 docs/statistics/goodness-of-fit.md create mode 100644 docs/statistics/hypothesis-tests.md create mode 100644 global.json diff --git a/.github/workflows/Snapshot.yml b/.github/workflows/Snapshot.yml index c172bf9f..64ae896e 100644 --- a/.github/workflows/Snapshot.yml +++ b/.github/workflows/Snapshot.yml @@ -10,6 +10,7 @@ jobs: with: dotnet-version: '9.0.x' project-names: 'Numerics' + version: '1.0.0' run-tests: true nuget-source: 'https://www.hec.usace.army.mil/nexus/repository/consequences-nuget-public/' secrets: diff --git a/Numerics/Mathematics/Differentiation/NumericalDerivative.cs b/Numerics/Mathematics/Differentiation/NumericalDerivative.cs index 7cd04bff..312c2fa6 100644 --- a/Numerics/Mathematics/Differentiation/NumericalDerivative.cs +++ b/Numerics/Mathematics/Differentiation/NumericalDerivative.cs @@ -1206,7 +1206,7 @@ public static double CalculateStepSize(double x, int order = 1) /// Vector to clamp (modified in place). /// Optional lower bounds for each element. /// Optional upper bounds for each element. - private static void ClampInPlace(double[] v, double[] lowerBounds, double[] upperBounds) + private static void ClampInPlace(double[] v, double[]? lowerBounds, double[]? upperBounds) { if (lowerBounds == null && upperBounds == null) return; @@ -1228,7 +1228,7 @@ private static void ClampInPlace(double[] v, double[] lowerBounds, double[] uppe /// Index of the parameter to check. /// Optional lower bounds. /// Distance to lower bound, or positive infinity if unbounded. - private static double AvailableLeft(double[] x, int j, double[] lowerBounds) + private static double AvailableLeft(double[] x, int j, double[]? lowerBounds) { return lowerBounds == null ? double.PositiveInfinity : x[j] - lowerBounds[j]; } @@ -1240,7 +1240,7 @@ private static double AvailableLeft(double[] x, int j, double[] lowerBounds) /// Index of the parameter to check. /// Optional upper bounds. /// Distance to upper bound, or positive infinity if unbounded. - private static double AvailableRight(double[] x, int j, double[] upperBounds) + private static double AvailableRight(double[] x, int j, double[]? upperBounds) { return upperBounds == null ? double.PositiveInfinity : upperBounds[j] - x[j]; } diff --git a/README.md b/README.md index 7c9f2db9..ae83967c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,17 @@ # Numerics ***Numerics*** is a free and open-source library for .NET developed by the U.S. Army Corps of Engineers Risk Management Center (USACE-RMC). ***Numerics*** provides a comprehensive set of methods and algorithms for numerical computations and statistical analysis. The library includes routines for interpolation, regression, time series data, statistics, machine learning, probability distributions, bootstrap uncertainty analysis, Bayesian Markov Chain Monte Carlo, optimization, root finding, and more. +## Documentation + +📚 **[User Guide and API Documentation](docs/index.md)** - Comprehensive documentation with examples and mathematical explanations + +The documentation covers: +- **Distributions**: Univariate probability distributions, parameter estimation, uncertainty analysis, copulas +- **Statistics**: Descriptive statistics, goodness-of-fit metrics, hypothesis tests +- **Mathematics**: Numerical integration, differentiation, optimization, linear algebra, root finding +- **Data**: Interpolation methods, time series analysis +- **Sampling**: Random number generation, MCMC methods including RWMH, ARWMH, DE-MCz, HMC, and Gibbs sampling + ## Support The RMC is committed to maintaining and supporting the software, providing regular updates, bug fixes, and enhancements on an annual basis or as needed. diff --git a/Test_Numerics/Distributions/Univariate/Test_EmpiricalDistribution.cs b/Test_Numerics/Distributions/Univariate/Test_EmpiricalDistribution.cs index 9ca4a6a7..4406083c 100644 --- a/Test_Numerics/Distributions/Univariate/Test_EmpiricalDistribution.cs +++ b/Test_Numerics/Distributions/Univariate/Test_EmpiricalDistribution.cs @@ -145,7 +145,7 @@ public void Test_ConvolveTwoUniformDistributions() var convolved = EmpiricalDistribution.Convolve(dist1, dist2, 1000); // Assert number of points - Assert.HasCount(1000, convolved.XValues); + Assert.HasCount(1000, convolved.XValues, "Should have exactly 1000 points"); // Expected: Min ≈ 0, Max ≈ 2, Mean ≈ 1 Assert.AreEqual(0.0, convolved.Minimum, 0.05, "Minimum should be approximately 0"); @@ -191,7 +191,7 @@ public void Test_ConvolveTwoNormalDistributions() var convolved = EmpiricalDistribution.Convolve(dist1, dist2, 2048); // Assert number of points - Assert.HasCount(2048, convolved.XValues); + Assert.HasCount(2048, convolved.XValues, "Should have exactly 2048 points"); // Expected: For N(0,1) + N(0,1) = N(0, sqrt(2)) // Mean ≈ 0, StdDev ≈ 1.414 @@ -227,7 +227,7 @@ public void Test_ConvolveDifferentRanges() var convolved = EmpiricalDistribution.Convolve(dist1, dist2, 500); // Assert number of points - Assert.HasCount(500, convolved.XValues); + Assert.HasCount(500, convolved.XValues, "Should have exactly 500 points"); // Expected: Range ≈ [5, 25], Mean ≈ 15 Assert.AreEqual(5.0, convolved.Minimum, 0.5, "Minimum should be approximately 5"); diff --git a/Test_Numerics/Distributions/Univariate/Test_GeneralizedExtremeValue.cs b/Test_Numerics/Distributions/Univariate/Test_GeneralizedExtremeValue.cs index 36d411d5..ca9f0ce3 100644 --- a/Test_Numerics/Distributions/Univariate/Test_GeneralizedExtremeValue.cs +++ b/Test_Numerics/Distributions/Univariate/Test_GeneralizedExtremeValue.cs @@ -84,8 +84,8 @@ public void Test_GEV_MOM_Fit() double true_x = 11012d; double true_a = 6209.4d; double true_k = 0.0736d; - Assert.IsLessThan(0.01d,(x - true_x) / true_x ); - Assert.IsLessThan(0.01d,(a - true_a) / true_a); + Assert.IsLessThan(0.01d, (x - true_x) / true_x ); + Assert.IsLessThan(0.01d, (a - true_a) / true_a ); Assert.IsLessThan(0.01d, (k - true_k) / true_k); } @@ -117,9 +117,9 @@ public void Test_GEV_LMOM_Fit() Assert.AreEqual(k, true_k, 0.001d); var lmom = GEV.LinearMomentsFromParameters(GEV.GetParameters); Assert.AreEqual(1648.806d, lmom[0], 0.001d); - Assert.AreEqual(138.2366d, lmom[1], 0.001d); - Assert.AreEqual(0.1030703d, lmom[2], 0.001d); - Assert.AreEqual(0.1277244d, lmom[3], 0.001d); + Assert.AreEqual(138.2366d, lmom[1], 0.001d); + Assert.AreEqual(0.1030703d, lmom[2], 0.001d); + Assert.AreEqual(0.1277244d, lmom[3], 0.001d); } /// @@ -227,7 +227,7 @@ public void Test_Construction() var GEV = new GeneralizedExtremeValue(); Assert.AreEqual(100,GEV.Xi); Assert.AreEqual(10,GEV.Alpha); - Assert.AreEqual(0, GEV.Kappa); + Assert.AreEqual(0,GEV.Kappa); var GEV2 = new GeneralizedExtremeValue(-100, 1, 1); Assert.AreEqual(-100,GEV2.Xi); @@ -258,7 +258,7 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual("Location (ξ)",GEV.ParametersToString[0, 0] ); + Assert.AreEqual("Location (ξ)", GEV.ParametersToString[0, 0]); Assert.AreEqual("Scale (α)", GEV.ParametersToString[1, 0]); Assert.AreEqual("Shape (κ)", GEV.ParametersToString[2, 0]); Assert.AreEqual("100", GEV.ParametersToString[0, 1]); @@ -288,13 +288,13 @@ public void Test_Mean() { var GEV = new GeneralizedExtremeValue(); var true_val = 100 + 10 * Tools.Euler; - Assert.AreEqual(GEV.Mean, true_val); + Assert.AreEqual(true_val, GEV.Mean); var GEV2 = new GeneralizedExtremeValue(100, 10, 0.9); Assert.AreEqual(100.42482, GEV2.Mean, 1e-04); var GEV3 = new GeneralizedExtremeValue(100, 10, 10); - Assert.AreEqual(double.NaN,GEV3.Mean); + Assert.AreEqual(double.NaN, GEV3.Mean); } /// @@ -317,10 +317,10 @@ public void Test_Median() public void Test_Mode() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(100,GEV.Mode); + Assert.AreEqual(100, GEV.Mode); var GEV2 = new GeneralizedExtremeValue(100, 10, 1); - Assert.AreEqual(95,GEV2.Mode); + Assert.AreEqual(95, GEV2.Mode); } /// @@ -336,7 +336,7 @@ public void Test_StandardDeviation() Assert.AreEqual(9.280898, GEV2.StandardDeviation, 1e-04); var GEV3 = new GeneralizedExtremeValue(100, 10, 1); - Assert.AreEqual(double.NaN,GEV3.StandardDeviation ); + Assert.AreEqual(double.NaN, GEV3.StandardDeviation); } /// @@ -362,13 +362,13 @@ public void Test_Skewness() public void Test_Kurtosis() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(3 + 12d / 5d,GEV.Kurtosis ); + Assert.AreEqual(3 + 12d / 5d, GEV.Kurtosis); var GEV2 = new GeneralizedExtremeValue(100, 10, 0.24); Assert.AreEqual(2.7659607, GEV2.Kurtosis, 1e-04); var GEV3 = new GeneralizedExtremeValue(100, 10, 1); - Assert.AreEqual(double.NaN,GEV3.Kurtosis); + Assert.AreEqual(double.NaN, GEV3.Kurtosis); } /// @@ -423,7 +423,7 @@ public void Test_CDF() var GEV2 = new GeneralizedExtremeValue(100, 10, 1); Assert.AreEqual(0.367879, GEV2.CDF(100), 1e-05); - Assert.AreEqual(1,GEV2.CDF(200)); + Assert.AreEqual(1, GEV2.CDF(200)); } /// @@ -433,9 +433,9 @@ public void Test_CDF() public void Test_InverseCDF() { var GEV = new GeneralizedExtremeValue(); - Assert.AreEqual(double.NegativeInfinity,GEV.InverseCDF(0) ); + Assert.AreEqual(double.NegativeInfinity, GEV.InverseCDF(0)); Assert.AreEqual(103.66512, GEV.InverseCDF(0.5), 1e-05); - Assert.AreEqual(double.PositiveInfinity,GEV.InverseCDF(1) ); + Assert.AreEqual(double.PositiveInfinity,GEV.InverseCDF(1)); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_GeneralizedLogistic.cs b/Test_Numerics/Distributions/Univariate/Test_GeneralizedLogistic.cs index 21859928..d5560ded 100644 --- a/Test_Numerics/Distributions/Univariate/Test_GeneralizedLogistic.cs +++ b/Test_Numerics/Distributions/Univariate/Test_GeneralizedLogistic.cs @@ -84,8 +84,8 @@ public void Test_GLO_MOM_Fit() double true_x = 31892d; double true_a = 9030d; double true_k = -0.05515d; - Assert.IsLessThan(0.01d,(x - true_x) / true_x); - Assert.IsLessThan(0.01d,(a - true_a) / true_a); + Assert.IsLessThan(0.01d, (x - true_x) / true_x ); + Assert.IsLessThan(0.01d, (a - true_a) / true_a ); Assert.IsLessThan(0.01d, (k - true_k) / true_k); } @@ -230,7 +230,7 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var l = new GeneralizedLogistic(); - Assert.AreEqual("Location (ξ)",l.ParametersToString[0, 0] ); + Assert.AreEqual("Location (ξ)", l.ParametersToString[0, 0]); Assert.AreEqual("Scale (α)", l.ParametersToString[1, 0]); Assert.AreEqual("Shape (κ)", l.ParametersToString[2, 0]); Assert.AreEqual("100", l.ParametersToString[0, 1]); @@ -265,7 +265,7 @@ public void Test_Mean() Assert.AreEqual(9.44703, l2.Mean, 1e-04); var l3 = new GeneralizedLogistic(100, 10, 1); - Assert.AreEqual(double.NaN,l3.Mean ); + Assert.AreEqual(double.NaN, l3.Mean); } /// @@ -307,7 +307,7 @@ public void Test_StandardDeviation() Assert.AreEqual(39.76482, l2.StandardDeviation, 1e-04); var l3 = new GeneralizedLogistic(100, 10, 1); - Assert.AreEqual(double.NaN,l3.StandardDeviation ); + Assert.AreEqual(double.NaN, l3.StandardDeviation); } /// @@ -333,13 +333,13 @@ public void Test_Skewness() public void Test_Kurtosis() { var l = new GeneralizedLogistic(); - Assert.AreEqual(21d / 5d,l.Kurtosis); + Assert.AreEqual(21d / 5d, l.Kurtosis); var l2 = new GeneralizedLogistic(100, 10, 0.24); Assert.AreEqual(199.733369, l2.Kurtosis, 1e-04); var l3 = new GeneralizedLogistic(100, 10, 0.25); - Assert.AreEqual(double.NaN,l3.Kurtosis ); + Assert.AreEqual(double.NaN, l3.Kurtosis); } /// @@ -375,7 +375,7 @@ public void Test_Maximum() public void Test_PDF() { var l = new GeneralizedLogistic(); - Assert.AreEqual(0.025,l.PDF(100) ); + Assert.AreEqual(0.025,l.PDF(100)); Assert.AreEqual(4.5395e-06, l.PDF(0), 1e-10); var l2 = new GeneralizedLogistic(100, 10, 1); diff --git a/Test_Numerics/Distributions/Univariate/Test_GeneralizedNormal.cs b/Test_Numerics/Distributions/Univariate/Test_GeneralizedNormal.cs index 708dfe74..b28d1e98 100644 --- a/Test_Numerics/Distributions/Univariate/Test_GeneralizedNormal.cs +++ b/Test_Numerics/Distributions/Univariate/Test_GeneralizedNormal.cs @@ -202,7 +202,7 @@ public void Test_ParametersToString() Assert.AreEqual("Scale (α)", n.ParametersToString[1, 0]); Assert.AreEqual("Shape (κ)", n.ParametersToString[2, 0]); Assert.AreEqual("100", n.ParametersToString[0, 1]); - Assert.AreEqual("10",n.ParametersToString[1, 1]); + Assert.AreEqual("10", n.ParametersToString[1, 1]); Assert.AreEqual("0", n.ParametersToString[2, 1]); } diff --git a/Test_Numerics/Distributions/Univariate/Test_GeneralizedPareto.cs b/Test_Numerics/Distributions/Univariate/Test_GeneralizedPareto.cs index c2b9d3be..501e3edc 100644 --- a/Test_Numerics/Distributions/Univariate/Test_GeneralizedPareto.cs +++ b/Test_Numerics/Distributions/Univariate/Test_GeneralizedPareto.cs @@ -84,8 +84,8 @@ public void Test_GPA_MOM_Fit() double true_x = 50169.23d; double true_a = 55443d; double true_k = 0.0956d; - Assert.IsLessThan(0.01d,(x - true_x) / true_x ); - Assert.IsLessThan(0.01d,(a - true_a) / true_a ); + Assert.IsLessThan(0.01d, (x - true_x) / true_x ); + Assert.IsLessThan(0.01d, (a - true_a) / true_a ); Assert.IsLessThan(0.01d, (k - true_k) / true_k); } @@ -364,13 +364,13 @@ public void Test_StandardDeviation() public void Test_Skewness() { var GPA = new GeneralizedPareto(); - Assert.AreEqual(2,GPA.Skewness); + Assert.AreEqual(2, GPA.Skewness); var GPA2 = new GeneralizedPareto(100, 10, 0.3); Assert.AreEqual(0.932039, GPA2.Skewness, 1e-04); var GPA3 = new GeneralizedPareto(100, 10, 1); - Assert.AreEqual(double.NaN,GPA3.Skewness ); + Assert.AreEqual(double.NaN, GPA3.Skewness); } /// @@ -424,7 +424,7 @@ public void Test_PDF() var GPA2 = new GeneralizedPareto(100, 10, 1); Assert.AreEqual(0,GPA2.PDF(200)); - Assert.AreEqual(0,GPA2.PDF(50)); + Assert.AreEqual(0, GPA2.PDF(50)); } /// diff --git a/Test_Numerics/Distributions/Univariate/Test_Geometric.cs b/Test_Numerics/Distributions/Univariate/Test_Geometric.cs index 279bf7c2..fe9293c7 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Geometric.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Geometric.cs @@ -126,7 +126,7 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var G = new Geometric(); - Assert.AreEqual("Probability (p)",G.ParametersToString[0, 0]); + Assert.AreEqual("Probability (p)", G.ParametersToString[0, 0]); Assert.AreEqual("0.5", G.ParametersToString[0, 1]); } @@ -150,7 +150,7 @@ public void Test_Mean() public void Test_Median() { var G = new Geometric(0.0001); - Assert.AreEqual(6931,G.Median); + Assert.AreEqual(6931, G.Median); var G2 = new Geometric(0.1); Assert.AreEqual(6, G2.Median); @@ -195,7 +195,7 @@ public void Test_Skewness() Assert.AreEqual(2.12132, G.Skewness, 1e-04); var G2 = new Geometric(0.3); - Assert.AreEqual(2.03188, G2.Skewness, 1e-04); + Assert.AreEqual(2.03188, G2.Skewness, 1e-04); } /// @@ -205,7 +205,7 @@ public void Test_Skewness() public void Test_Kurtosis() { var G = new Geometric(); - Assert.AreEqual(9.5,G.Kurtosis); + Assert.AreEqual(9.5, G.Kurtosis); var G2 = new Geometric(0.3); Assert.AreEqual(9.12857, G2.Kurtosis, 1e-04); @@ -218,7 +218,7 @@ public void Test_Kurtosis() public void Test_MinMax() { var G = new Geometric(); - Assert.AreEqual(0,G.Minimum); + Assert.AreEqual(0, G.Minimum); Assert.AreEqual(double.PositiveInfinity,G.Maximum); var G2 = new Geometric(0.3); @@ -233,8 +233,8 @@ public void Test_MinMax() public void Test_PDF() { var G = new Geometric(); - Assert.AreEqual(0.5,G.PDF(0)); - Assert.AreEqual(0.125,G.PDF(2)); + Assert.AreEqual(0.5, G.PDF(0)); + Assert.AreEqual(0.125, G.PDF(2)); Assert.AreEqual(0, G.PDF(-1)); var G2 = new Geometric(0.3); @@ -256,7 +256,7 @@ public void Test_CDF() var G2 = new Geometric(0.3); Assert.AreEqual(0.657, G2.CDF(2)); - Assert.AreEqual(1,G2.CDF(100), 1e-04); + Assert.AreEqual(1, G2.CDF(100), 1e-04); } /// @@ -266,11 +266,11 @@ public void Test_CDF() public void Test_InverseCDF() { var G = new Geometric(); - Assert.AreEqual(0,G.InverseCDF(0.3)); - Assert.AreEqual(1,G.InverseCDF(0.7), 1e-04); + Assert.AreEqual(0, G.InverseCDF(0.3)); + Assert.AreEqual(1, G.InverseCDF(0.7), 1e-04); var G2 = new Geometric(0.3); - Assert.AreEqual(1,G2.InverseCDF(0.5), 1e-04); + Assert.AreEqual(1, G2.InverseCDF(0.5), 1e-04); Assert.AreEqual(6,G2.InverseCDF(0.9)); } } diff --git a/Test_Numerics/Distributions/Univariate/Test_Gumbel.cs b/Test_Numerics/Distributions/Univariate/Test_Gumbel.cs index f9bb555b..0c0ff506 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Gumbel.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Gumbel.cs @@ -80,8 +80,8 @@ public void Test_GUM_MOM_Fit() double a = GUM.Alpha; double true_x = 8074.4d; double true_a = 4441.4d; - Assert.IsLessThan(0.01d,(x - true_x) / true_x); - Assert.IsLessThan(0.01d,(a - true_a) / true_a ); + Assert.IsLessThan(0.01d, (x - true_x) / true_x ); + Assert.IsLessThan(0.01d, (a - true_a) / true_a ); } /// @@ -102,8 +102,8 @@ public void Test_GUM_LMOM_Fit() var lmom = GUM.LinearMomentsFromParameters(GUM.GetParameters); Assert.AreEqual(1648.806d, lmom[0], 0.001d); Assert.AreEqual(138.2366d, lmom[1], 0.001d); - Assert.AreEqual(0.169925d, lmom[2], 0.001d); - Assert.AreEqual(0.150375d, lmom[3], 0.001d); + Assert.AreEqual(0.169925d, lmom[2], 0.001d); + Assert.AreEqual(0.150375d, lmom[3], 0.001d); } /// @@ -214,7 +214,7 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var GUM = new Gumbel(); - Assert.AreEqual("Location (ξ)",GUM.ParametersToString[0, 0] ); + Assert.AreEqual("Location (ξ)",GUM.ParametersToString[0, 0]); Assert.AreEqual("Scale (α)", GUM.ParametersToString[1, 0]); Assert.AreEqual("100", GUM.ParametersToString[0, 1]); Assert.AreEqual("10", GUM.ParametersToString[1, 1]); @@ -280,7 +280,7 @@ public void Test_StandardDeviation() public void Test_Skewness() { var GUM = new Gumbel(); - Assert.AreEqual(1.1396,GUM.Skewness); + Assert.AreEqual(1.1396, GUM.Skewness); var GUM2 = new Gumbel(10, 1); Assert.AreEqual(1.1396, GUM2.Skewness); @@ -322,7 +322,7 @@ public void Test_PDF() { var GUM = new Gumbel(); Assert.AreEqual(0.0367879, GUM.PDF(100), 1e-04); - Assert.AreEqual(0,GUM.PDF(0)); + Assert.AreEqual(0, GUM.PDF(0)); Assert.AreEqual(4.5397e-06, GUM.PDF(200), 1e-10); var GUM2 = new Gumbel(10, 1); diff --git a/Test_Numerics/Distributions/Univariate/Test_InverseChiSquared.cs b/Test_Numerics/Distributions/Univariate/Test_InverseChiSquared.cs index 24975eae..ee0651a9 100644 --- a/Test_Numerics/Distributions/Univariate/Test_InverseChiSquared.cs +++ b/Test_Numerics/Distributions/Univariate/Test_InverseChiSquared.cs @@ -83,8 +83,8 @@ public void Test_InverseChiSquaredDist() public void Test_Construction() { var IX = new InverseChiSquared(); - Assert.AreEqual(10,IX.DegreesOfFreedom); - Assert.AreEqual(1,IX.Sigma); + Assert.AreEqual(10, IX.DegreesOfFreedom); + Assert.AreEqual(1, IX.Sigma); var IX2 = new InverseChiSquared(2, 1); Assert.AreEqual(2, IX2.DegreesOfFreedom); @@ -140,7 +140,7 @@ public void Test_Mean() public void Test_Median() { var IX = new InverseChiSquared(); - Assert.AreEqual(0.93418, IX.Median,1e-04); + Assert.AreEqual(0.93418, IX.Median, 1e-04); var IX2 = new InverseChiSquared(7, 1); Assert.AreEqual(0.906544, IX2.Median, 1e-04); @@ -169,7 +169,7 @@ public void Test_StandardDeviation() Assert.AreEqual(0.72168, IX.StandardDeviation, 1e-04); var IX2 = new InverseChiSquared(2, 2); - Assert.AreEqual(double.NaN,IX2.StandardDeviation ); + Assert.AreEqual(double.NaN, IX2.StandardDeviation); } /// @@ -182,7 +182,7 @@ public void Test_Skewness() Assert.AreEqual(3.46410, IX.Skewness, 1e-04); var IX2 = new InverseChiSquared(2, 2); - Assert.AreEqual(double.NaN,IX2.Skewness); + Assert.AreEqual(double.NaN, IX2.Skewness); } /// @@ -192,10 +192,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var IX = new InverseChiSquared(); - Assert.AreEqual(45,IX.Kurtosis); + Assert.AreEqual(45, IX.Kurtosis); var IX2 = new InverseChiSquared(2,2); - Assert.AreEqual(double.NaN,IX2.Kurtosis ); + Assert.AreEqual(double.NaN, IX2.Kurtosis); } /// @@ -205,8 +205,8 @@ public void Test_Kurtosis() public void Test_MinMax() { var IX = new InverseChiSquared(); - Assert.AreEqual(0,IX.Minimum); - Assert.AreEqual(double.PositiveInfinity,IX.Maximum ); + Assert.AreEqual(0, IX.Minimum); + Assert.AreEqual(double.PositiveInfinity, IX.Maximum); } /// @@ -242,7 +242,7 @@ public void Test_CDF() public void Test_InverseCDF() { var IX = new InverseChiSquared(); - Assert.AreEqual(0,IX.InverseCDF(0)); + Assert.AreEqual(0, IX.InverseCDF(0)); Assert.AreEqual(double.PositiveInfinity,IX.InverseCDF(1)); Assert.AreEqual(1.17807, IX.InverseCDF(0.3), 1e-04); } diff --git a/Test_Numerics/Distributions/Univariate/Test_InverseGamma.cs b/Test_Numerics/Distributions/Univariate/Test_InverseGamma.cs index c8a2042c..eb43498b 100644 --- a/Test_Numerics/Distributions/Univariate/Test_InverseGamma.cs +++ b/Test_Numerics/Distributions/Univariate/Test_InverseGamma.cs @@ -125,7 +125,7 @@ public void Test_Mean() Assert.AreEqual(0.5, IG.Mean); var IG2 = new InverseGamma(1, 1); - Assert.AreEqual(double.NaN,IG2.Mean ); + Assert.AreEqual(double.NaN,IG2.Mean); } /// @@ -184,7 +184,7 @@ public void Test_Skewness() public void Test_Kurtosis() { var IG = new InverseGamma(); - Assert.AreEqual(double.NaN,IG.Kurtosis ); + Assert.AreEqual(double.NaN, IG.Kurtosis); var IG2 = new InverseGamma(0.5, 5); Assert.AreEqual(45, IG2.Kurtosis); @@ -212,12 +212,12 @@ public void Test_MinMax() public void Test_PDF() { var IG = new InverseGamma(2,4); - Assert.AreEqual(0,IG.PDF(-2)); - Assert.AreEqual(0.00057200, IG.PDF(5), 1e-07); + Assert.AreEqual(0, IG.PDF(-2)); + Assert.AreEqual(0.00057200, IG.PDF(5), 1e-07); Assert.AreEqual(1.74443, IG.PDF(0.42), 1e-04); var IG2 = new InverseGamma(0.42,2.4); - Assert.AreEqual(double.NaN,IG2.PDF(0) ); + Assert.AreEqual(double.NaN, IG2.PDF(0)); Assert.AreEqual(1.48386, IG2.PDF(0.3), 1e-05); } @@ -228,7 +228,7 @@ public void Test_PDF() public void Test_CDF() { var IG = new InverseGamma(); - Assert.AreEqual(0,IG.CDF(-1)); + Assert.AreEqual(0, IG.CDF(-1)); Assert.AreEqual(1, IG.CDF(double.PositiveInfinity)); var IG2 = new InverseGamma(2, 2); diff --git a/Test_Numerics/Distributions/Univariate/Test_KappaFour.cs b/Test_Numerics/Distributions/Univariate/Test_KappaFour.cs index dfb9538b..9f26b6c5 100644 --- a/Test_Numerics/Distributions/Univariate/Test_KappaFour.cs +++ b/Test_Numerics/Distributions/Univariate/Test_KappaFour.cs @@ -159,7 +159,7 @@ public void Test_K4_PartialDerivatives() public void Test_Construction() { var k4 = new KappaFour(); - Assert.AreEqual(100,k4.Xi ); + Assert.AreEqual(100,k4.Xi); Assert.AreEqual(10, k4.Alpha); Assert.AreEqual(0, k4.Kappa); Assert.AreEqual(0, k4.Hondo); @@ -194,7 +194,7 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var k4 = new KappaFour(); - Assert.AreEqual("Location (ξ)",k4.ParametersToString[0, 0] ); + Assert.AreEqual("Location (ξ)",k4.ParametersToString[0, 0]); Assert.AreEqual("Scale (α)", k4.ParametersToString[1, 0]); Assert.AreEqual("Shape (κ)", k4.ParametersToString[2, 0]); Assert.AreEqual("Shape (h)", k4.ParametersToString[3, 0]); diff --git a/Test_Numerics/Distributions/Univariate/Test_LnNormal.cs b/Test_Numerics/Distributions/Univariate/Test_LnNormal.cs index e0cceabe..30177e06 100644 --- a/Test_Numerics/Distributions/Univariate/Test_LnNormal.cs +++ b/Test_Numerics/Distributions/Univariate/Test_LnNormal.cs @@ -81,8 +81,8 @@ public void Test_LnNormal_MOM_Fit() double u2 = LN.Sigma; double true_u1 = 10.7676d; double true_u2 = 0.4544d; - Assert.IsLessThan(0.01d,(u1 - true_u1) / true_u1); - Assert.IsLessThan(0.01d,(u2 - true_u2) / true_u2 ); + Assert.IsLessThan(0.01d, (u1 - true_u1) / true_u1 ); + Assert.IsLessThan( 0.01d, (u2 - true_u2) / true_u2 ); } /// @@ -303,7 +303,7 @@ public void Test_MinMax() var LN2 = new LnNormal(1, 1); Assert.AreEqual(0,LN2.Minimum); - Assert.AreEqual(double.PositiveInfinity,LN2.Maximum ); + Assert.AreEqual(double.PositiveInfinity,LN2.Maximum); } /// @@ -314,10 +314,10 @@ public void Test_PDF() { var LN = new LnNormal(); Assert.AreEqual(0.03033, LN.PDF(1), 1e-04); - Assert.AreEqual(0,LN.PDF(-1)); + Assert.AreEqual(0, LN.PDF(-1)); var LN2 = new LnNormal(2.5, 2.5); - Assert.AreEqual(0.303322, LN2.PDF(0.5), 1e-04); + Assert.AreEqual(0.303322, LN2.PDF(0.5), 1e-04); } /// diff --git a/Test_Numerics/Distributions/Univariate/Test_LogNormal.cs b/Test_Numerics/Distributions/Univariate/Test_LogNormal.cs index 78466327..39e11410 100644 --- a/Test_Numerics/Distributions/Univariate/Test_LogNormal.cs +++ b/Test_Numerics/Distributions/Univariate/Test_LogNormal.cs @@ -216,9 +216,9 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var LogN = new LogNormal(); - Assert.AreEqual("Mean (of log) (µ)",LogN.ParametersToString[0, 0]); - Assert.AreEqual("Std Dev (of log) (σ)",LogN.ParametersToString[1, 0] ); - Assert.AreEqual("3",LogN.ParametersToString[0, 1] ); + Assert.AreEqual("Mean (of log) (µ)",LogN.ParametersToString[0, 0] ); + Assert.AreEqual("Std Dev (of log) (σ)",LogN.ParametersToString[1, 0]); + Assert.AreEqual("3",LogN.ParametersToString[0, 1]); Assert.AreEqual("0.5", LogN.ParametersToString[1, 1]); } @@ -271,7 +271,7 @@ public void Test_PDF() public void Test_CDF() { var LogN = new LogNormal(1.5, 0.1); - Assert.AreEqual(0,LogN.CDF(0.1)); + Assert.AreEqual(0, LogN.CDF(0.1)); var LogN2 = new LogNormal(1.5, 1.5); Assert.AreEqual(0.11493, LogN2.CDF(0.5), 1e-05); @@ -287,7 +287,7 @@ public void Test_InverseCDF() Assert.AreEqual(40183.99248, LogN.InverseCDF(0.8), 1e-04); var LogN2 = new LogNormal(1.5, 2.5); - Assert.AreEqual(40183.99248, LogN.InverseCDF(0.8), 1e-05); + Assert.AreEqual(40183.99248, LogN.InverseCDF(0.8), 1e-05); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_LogPearsonTypeIII.cs b/Test_Numerics/Distributions/Univariate/Test_LogPearsonTypeIII.cs index 0fd4fb62..fd84945e 100644 --- a/Test_Numerics/Distributions/Univariate/Test_LogPearsonTypeIII.cs +++ b/Test_Numerics/Distributions/Univariate/Test_LogPearsonTypeIII.cs @@ -94,15 +94,15 @@ public void Test_LP3_IndirectMOM() double true_mean = 191.38768d; double true_stDev = 47.62977d; double true_skew = 0.71589d; - Assert.IsLessThan(0.01d,(xi - true_xi) / true_xi); - Assert.IsLessThan(0.01d,(beta - true_beta) / true_beta); + Assert.IsLessThan(0.01d, (xi - true_xi) / true_xi ); + Assert.IsLessThan(0.01d, (beta - true_beta) / true_beta ); Assert.IsLessThan(0.01d, (alpha - true_alpha) / true_alpha); Assert.IsLessThan(0.01d, (meanOfLog - true_meanOfLog) / true_meanOfLog); Assert.IsLessThan(0.01d, (stDevOfLog - true_stDevOfLog) / true_stDevOfLog); Assert.IsLessThan(0.01d, (skewOfLog - true_skewOfLog) / true_skewOfLog); Assert.IsLessThan(0.01d, (mean - true_mean) / true_mean); Assert.IsLessThan(0.01d, (stDev - true_stDev) / true_stDev); - Assert.IsLessThan(0.01d, (skew - true_skew) / true_skew ); + Assert.IsLessThan(0.01d, (skew - true_skew) / true_skew); } /// @@ -234,7 +234,7 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var LP3 = new LogPearsonTypeIII(); - Assert.AreEqual("Mean (of log) (µ)",LP3.ParametersToString[0, 0] ); + Assert.AreEqual("Mean (of log) (µ)", LP3.ParametersToString[0, 0]); Assert.AreEqual("Std Dev (of log) (σ)", LP3.ParametersToString[1, 0]); Assert.AreEqual("Skew (of log) (γ)", LP3.ParametersToString[2, 0]); Assert.AreEqual("3", LP3.ParametersToString[0, 1]); @@ -341,7 +341,7 @@ public void Test_InverseCDF() { var LP3 = new LogPearsonTypeIII(); Assert.AreEqual(0,LP3.InverseCDF(0)); - Assert.AreEqual(double.PositiveInfinity,LP3.InverseCDF(1) ); + Assert.AreEqual(double.PositiveInfinity,LP3.InverseCDF(1)); Assert.AreEqual(546.7637, LP3.InverseCDF(0.3), 1e-04); } @@ -354,7 +354,7 @@ public void ValidateWilsonHilfertyInverseCDF() var LP3 = new LogPearsonTypeIII(); Assert.AreEqual(0, LP3.WilsonHilfertyInverseCDF(0)); Assert.AreEqual(double.PositiveInfinity, LP3.WilsonHilfertyInverseCDF(1)); - Assert.AreEqual(747.01005, LP3.WilsonHilfertyInverseCDF(0.4),1e-05); + Assert.AreEqual(747.01005, LP3.WilsonHilfertyInverseCDF(0.4), 1e-05); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_Logistic.cs b/Test_Numerics/Distributions/Univariate/Test_Logistic.cs index 48ad38d9..0fa93603 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Logistic.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Logistic.cs @@ -80,8 +80,8 @@ public void Test_Logistic_MOM_Fit() double a = LO.Alpha; double true_x = 12665d; double true_a = 2596.62d; - Assert.IsLessThan(0.01d,(x - true_x) / true_x ); - Assert.IsLessThan(0.01d,(a - true_a) / true_a); + Assert.IsLessThan(0.01d, (x - true_x) / true_x ); + Assert.IsLessThan(0.01d, (a - true_a) / true_a ); } /// @@ -125,7 +125,7 @@ public void Test_Logistic_Quantile() var LO = new Logistic(12665d, 2596.62d); double q100 = LO.InverseCDF(0.99d); double true_100 = 24597d; - Assert.IsLessThan(0.01d, (q100 - true_100) / true_100 ); + Assert.IsLessThan(0.01d, (q100 - true_100) / true_100); double p = LO.CDF(q100); double true_p = 0.99d; Assert.AreEqual(p, true_p); @@ -150,13 +150,13 @@ public void Test_Logistic_StandardError() var LO = new Logistic(12665d, 2596.62d); double se100 = Math.Sqrt(LO.QuantileVariance(0.99d, 48, ParameterEstimationMethod.MethodOfMoments)); double true_se100 = 1684d; - Assert.IsLessThan(0.01d,(se100 - true_se100) / true_se100); + Assert.IsLessThan(0.01d, (se100 - true_se100) / true_se100); // Maximum Likelihood LO = new Logistic(12628.59d, 2708.64d); se100 = Math.Sqrt(LO.QuantileVariance(0.99d, 48, ParameterEstimationMethod.MaximumLikelihood)); true_se100 = 1648d; - Assert.IsLessThan(0.01d,(se100 - true_se100) / true_se100); + Assert.IsLessThan(0.01d, (se100 - true_se100) / true_se100); } /// @@ -166,8 +166,8 @@ public void Test_Logistic_StandardError() public void Test_Construction() { var LO = new Logistic(); - Assert.AreEqual(0,LO.Xi); - Assert.AreEqual(0.1,LO.Alpha); + Assert.AreEqual(0, LO.Xi); + Assert.AreEqual(0.1, LO.Alpha); var LO2 = new Logistic(1, 1); Assert.AreEqual(1,LO2.Xi); @@ -197,7 +197,7 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var LO = new Logistic(); - Assert.AreEqual("Location (ξ)",LO.ParametersToString[0, 0] ); + Assert.AreEqual("Location (ξ)", LO.ParametersToString[0, 0]); Assert.AreEqual("Scale (α)", LO.ParametersToString[1, 0]); Assert.AreEqual("0", LO.ParametersToString[0, 1]); Assert.AreEqual("0.1", LO.ParametersToString[1, 1]); @@ -302,7 +302,7 @@ public void Test_Kurtosis() public void Test_MinMax() { var LO = new Logistic(); - Assert.AreEqual(double.NegativeInfinity,LO.Minimum ); + Assert.AreEqual(double.NegativeInfinity,LO.Minimum); Assert.AreEqual(double.PositiveInfinity, LO.Maximum); var LO2 = new Logistic(1, 1); @@ -319,7 +319,7 @@ public void Test_PDF() var LO = new Logistic(5,2); Assert.AreEqual(0.00332, LO.PDF(-5), 1e-04); Assert.AreEqual(0.03505, LO.PDF(0), 1e-04); - Assert.AreEqual(0.125,LO.PDF(5) ); + Assert.AreEqual(0.125, LO.PDF(5)); } /// @@ -331,7 +331,7 @@ public void Test_CDF() var LO = new Logistic(5,2); Assert.AreEqual(0.00669, LO.CDF(-5), 1e-05); Assert.AreEqual(0.07585, LO.CDF(0), 1e-04); - Assert.AreEqual(0.5,LO.CDF(5) ); + Assert.AreEqual(0.5, LO.CDF(5)); } /// diff --git a/Test_Numerics/Distributions/Univariate/Test_NoncentralT.cs b/Test_Numerics/Distributions/Univariate/Test_NoncentralT.cs index 7fb40330..f8e17c82 100644 --- a/Test_Numerics/Distributions/Univariate/Test_NoncentralT.cs +++ b/Test_Numerics/Distributions/Univariate/Test_NoncentralT.cs @@ -114,7 +114,7 @@ public void Test_NoncentralT_InverseCDF() public void Test_Construction() { var t = new NoncentralT(); - Assert.AreEqual(10,t.DegreesOfFreedom ); + Assert.AreEqual(10,t.DegreesOfFreedom); Assert.AreEqual(0, t.Noncentrality); var t2 = new NoncentralT(1, 1); @@ -220,7 +220,7 @@ public void Test_Skewness() public void Test_Kurtosis() { var t = new NoncentralT(); - Assert.AreEqual(4.0, t.Kurtosis, 1E-4); + Assert.AreEqual(4.0, t.Kurtosis, 1E-4); } /// @@ -245,7 +245,7 @@ public void Test_MinMax() public void Test_PDF() { var t = new NoncentralT(); - Assert.AreEqual(0.38910, t.PDF(0),1e-04); + Assert.AreEqual(0.38910, t.PDF(0), 1e-04); Assert.AreEqual(0.23036, t.PDF(1),1e-04); } @@ -256,7 +256,7 @@ public void Test_PDF() public void Test_CDF() { var t = new NoncentralT(); - Assert.AreEqual(0.82955, t.CDF(1),1e-04); + Assert.AreEqual(0.82955, t.CDF(1), 1e-04); } /// diff --git a/Test_Numerics/Distributions/Univariate/Test_Normal.cs b/Test_Numerics/Distributions/Univariate/Test_Normal.cs index 3703e7a2..889f4e54 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Normal.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Normal.cs @@ -82,8 +82,8 @@ public void Test_Normal_MOM_Fit() double u2 = norm.Sigma; double true_u1 = 12665d; double true_u2 = 4710d; - Assert.IsLessThan(0.01d,(u1 - true_u1) / true_u1); - Assert.IsLessThan(0.01d,(u2 - true_u2) / true_u2 ); + Assert.IsLessThan( 0.01d, (u1 - true_u1) / true_u1 ); + Assert.IsLessThan( 0.01d, (u2 - true_u2) / true_u2 ); } @@ -323,7 +323,7 @@ public void Test_Kurtosis() public void Test_MinMax() { var N = new Normal(); - Assert.AreEqual(double.NegativeInfinity,N.Minimum ); + Assert.AreEqual(double.NegativeInfinity,N.Minimum); Assert.AreEqual(double.PositiveInfinity, N.Maximum); var N2 = new Normal(5, 9); @@ -354,7 +354,7 @@ public void Test_CDF() var N = new Normal(5,2); Assert.AreEqual(0.006209, N.CDF(0), 1e-04); Assert.AreEqual(0.30853, N.CDF(4), 1e-04); - Assert.AreEqual(0.5,N.CDF(5)); + Assert.AreEqual(0.5, N.CDF(5)); } } } diff --git a/Test_Numerics/Distributions/Univariate/Test_Pareto.cs b/Test_Numerics/Distributions/Univariate/Test_Pareto.cs index 105578aa..b160758a 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Pareto.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Pareto.cs @@ -156,7 +156,7 @@ public void Test_Mean() Assert.AreEqual(1.1111, p.Mean, 1e-04); var p2 = new Pareto(1,1); - Assert.AreEqual(double.PositiveInfinity,p2.Mean ); + Assert.AreEqual(double.PositiveInfinity, p2.Mean); } /// @@ -195,7 +195,7 @@ public void Test_StandardDeviation() Assert.AreEqual(0.12422, p.StandardDeviation, 1e-04); var p2 = new Pareto(1, 2); - Assert.AreEqual(double.PositiveInfinity,p2.StandardDeviation ); + Assert.AreEqual(double.PositiveInfinity, p2.StandardDeviation); } /// @@ -208,7 +208,7 @@ public void Test_Skewness() Assert.AreEqual(2.81105, p.Skewness, 1e-04); var p2 = new Pareto(1, 3); - Assert.AreEqual(double.NaN,p2.Skewness); + Assert.AreEqual(double.NaN, p2.Skewness); } /// @@ -218,10 +218,10 @@ public void Test_Skewness() public void Test_Kurtosis() { var p = new Pareto(); - Assert.AreEqual(17.82857, p.Kurtosis, 1e-04); + Assert.AreEqual(17.82857, p.Kurtosis, 1e-04); var p2 = new Pareto(1, 4); - Assert.AreEqual(double.NaN,p2.Kurtosis); + Assert.AreEqual(double.NaN, p2.Kurtosis); } /// @@ -246,7 +246,7 @@ public void Test_MinMax() public void Test_PDF() { var p = new Pareto(1,1); - Assert.AreEqual(1,p.PDF(1)); + Assert.AreEqual(1, p.PDF(1)); Assert.AreEqual(4d / 9d,p.PDF(1.5) ); var p2 = new Pareto(3, 2); diff --git a/Test_Numerics/Distributions/Univariate/Test_PearsonTypeIII.cs b/Test_Numerics/Distributions/Univariate/Test_PearsonTypeIII.cs index a8d139f4..45606a90 100644 --- a/Test_Numerics/Distributions/Univariate/Test_PearsonTypeIII.cs +++ b/Test_Numerics/Distributions/Univariate/Test_PearsonTypeIII.cs @@ -94,8 +94,8 @@ public void Test_P3_MOM() double true_mean = 191.31739d; double true_stDev = 47.96161d; double true_skew = 0.86055d; - Assert.IsLessThan(0.01d,(xi - true_xi) / true_xi); - Assert.IsLessThan(0.01d,(beta - true_beta) / true_beta); + Assert.IsLessThan(0.01d, (xi - true_xi) / true_xi ); + Assert.IsLessThan(0.01d, (beta - true_beta) / true_beta ); Assert.IsLessThan(0.01d, (alpha - true_alpha) / true_alpha); Assert.IsLessThan(0.01d, (mu - true_mu) / true_mu); Assert.IsLessThan(0.01d, (sigma - true_sigma) / true_sigma); diff --git a/Test_Numerics/Distributions/Univariate/Test_Pert.cs b/Test_Numerics/Distributions/Univariate/Test_Pert.cs index 799b90b2..bbc4ea78 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Pert.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Pert.cs @@ -183,7 +183,7 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var p = new Pert(); - Assert.AreEqual("Min (a)",p.ParametersToString[0, 0] ); + Assert.AreEqual("Min (a)",p.ParametersToString[0, 0]); Assert.AreEqual("Most Likely (c)", p.ParametersToString[1, 0]); Assert.AreEqual("Max (b)", p.ParametersToString[2, 0]); Assert.AreEqual("0", p.ParametersToString[0, 1]); @@ -258,7 +258,7 @@ public void Test_StandardDeviation() public void Test_Skewness() { var p = new Pert(); - Assert.AreEqual(0,p.Skewness); + Assert.AreEqual(0, p.Skewness); } /// @@ -278,7 +278,7 @@ public void Test_Kurtosis() public void Test_MinMax() { var p = new Pert(); - Assert.AreEqual(0,p.Minimum); + Assert.AreEqual(0, p.Minimum); Assert.AreEqual(1, p.Maximum); var p2 = new Pert(1, 1.5, 2); diff --git a/Test_Numerics/Distributions/Univariate/Test_PertPercentileDists.cs b/Test_Numerics/Distributions/Univariate/Test_PertPercentileDists.cs index 370fad88..bebd282d 100644 --- a/Test_Numerics/Distributions/Univariate/Test_PertPercentileDists.cs +++ b/Test_Numerics/Distributions/Univariate/Test_PertPercentileDists.cs @@ -82,8 +82,8 @@ public void Test_PertPercentile_AtRisk() Assert.AreEqual(pert.StandardDeviation, true_stdDev, 1E-3); Assert.AreEqual(pert.Skewness, true_skew, 1E-3); Assert.AreEqual(pert.Kurtosis, true_kurt, 1E-3); - Assert.AreEqual(0.05,pert.CDF(true_icdf05), 1E-3); - Assert.AreEqual(0.5, pert.CDF(true_icdf50), 1E-3); + Assert.AreEqual(0.05, pert.CDF(true_icdf05), 1E-3); + Assert.AreEqual(0.5, pert.CDF(true_icdf50), 1E-3); Assert.AreEqual(0.95, pert.CDF(true_icdf95), 1E-3); Assert.AreEqual(pert.InverseCDF(0.05d), true_icdf05, 1E-3); Assert.AreEqual(pert.InverseCDF(0.5d), true_icdf50, 1E-3); @@ -111,9 +111,9 @@ public void Test_PertPercentileZ_AtRisk() Assert.AreEqual(pert.Mean, true_mean, 1E-2); Assert.AreEqual(pert.Median, true_median, 1E-2); Assert.AreEqual(pert.Mode, true_mode, 1E-2); - Assert.AreEqual(0.05, pert.CDF(true_icdf05), 1E-2); - Assert.AreEqual(0.5, pert.CDF(true_icdf50), 1E-2); - Assert.AreEqual(0.95, pert.CDF(true_icdf95), 1E-2); + Assert.AreEqual(0.05, pert.CDF(true_icdf05), 1E-2); + Assert.AreEqual(0.5, pert.CDF(true_icdf50), 1E-2); + Assert.AreEqual(0.95, pert.CDF(true_icdf95), 1E-2); Assert.AreEqual(pert.InverseCDF(0.05d), true_icdf05, 1E-2); Assert.AreEqual(pert.InverseCDF(0.5d), true_icdf50, 1E-2); Assert.AreEqual(pert.InverseCDF(0.95d), true_icdf95, 1E-2); @@ -136,7 +136,7 @@ public void Test_PertPercentile() Assert.AreEqual(0.05, pert.CDF(fifth), 1E-1); Assert.AreEqual(0.5, pert.CDF(fiftieth), 1E-1); - Assert.AreEqual(0.95, pert.CDF(ninetyfifth), 1E-1); + Assert.AreEqual(0.95, pert.CDF(ninetyfifth), 1E-1); Assert.AreEqual(pert.InverseCDF(0.05d), fifth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.5d), fiftieth, 1E-1); Assert.AreEqual(pert.InverseCDF(0.95d), ninetyfifth, 1E-1); diff --git a/Test_Numerics/Distributions/Univariate/Test_Poisson.cs b/Test_Numerics/Distributions/Univariate/Test_Poisson.cs index 9ab9b4bd..0f7694b3 100644 --- a/Test_Numerics/Distributions/Univariate/Test_Poisson.cs +++ b/Test_Numerics/Distributions/Univariate/Test_Poisson.cs @@ -92,7 +92,7 @@ public void Test_PoissonDist() public void Test_Construction() { var P = new Poisson(); - Assert.AreEqual(1,P.Lambda); + Assert.AreEqual(1, P.Lambda); var P2 = new Poisson(10); Assert.AreEqual(10,P2.Lambda); @@ -121,7 +121,7 @@ public void Test_InvalidParameters() public void Test_ParametersToString() { var P = new Poisson(); - Assert.AreEqual("Rate (λ)",P.ParametersToString[0, 0] ); + Assert.AreEqual("Rate (λ)",P.ParametersToString[0, 0]); Assert.AreEqual("1",P.ParametersToString[0, 1]); } @@ -149,7 +149,7 @@ public void Test_Mean() Assert.AreEqual(1,P.Mean); var P2 = new Poisson(10); - Assert.AreEqual(10,P2.Mean ); + Assert.AreEqual(10,P2.Mean); } /// @@ -172,7 +172,7 @@ public void Test_Mode() Assert.AreEqual(1, P.Mode); var P2 = new Poisson(2.4); - Assert.AreEqual(2,P2.Mode); + Assert.AreEqual(2, P2.Mode); } /// @@ -182,7 +182,7 @@ public void Test_Mode() public void Test_StandardDeviation() { var P = new Poisson(); - Assert.AreEqual(1,P.StandardDeviation); + Assert.AreEqual(1, P.StandardDeviation); var P2 = new Poisson(4); Assert.AreEqual(2, P2.StandardDeviation); @@ -266,8 +266,8 @@ public void Test_CDF() public void Test_InverseCDF() { var P = new Poisson(); - Assert.AreEqual(0,P.InverseCDF(0)); - Assert.AreEqual(double.PositiveInfinity,P.InverseCDF(1)); + Assert.AreEqual(0, P.InverseCDF(0)); + Assert.AreEqual(double.PositiveInfinity,P.InverseCDF(1) ); } } } diff --git a/Test_Numerics/Machine Learning/Supervised/Test_DecisionTree.cs b/Test_Numerics/Machine Learning/Supervised/Test_DecisionTree.cs index 8f8040c2..ec39e56c 100644 --- a/Test_Numerics/Machine Learning/Supervised/Test_DecisionTree.cs +++ b/Test_Numerics/Machine Learning/Supervised/Test_DecisionTree.cs @@ -135,7 +135,7 @@ public void Test_DecisionTree_Regression() var lmR2 = GoodnessOfFit.RSquared(Y_test.Array, lmPredict); // Linear regress is better - Assert.IsLessThan(lmR2,treeR2 ); + Assert.IsLessThan(lmR2,treeR2); } diff --git a/Test_Numerics/Machine Learning/Supervised/Test_RandomForest.cs b/Test_Numerics/Machine Learning/Supervised/Test_RandomForest.cs index 3aba7072..e779599d 100644 --- a/Test_Numerics/Machine Learning/Supervised/Test_RandomForest.cs +++ b/Test_Numerics/Machine Learning/Supervised/Test_RandomForest.cs @@ -89,7 +89,7 @@ public void Test_RandomForest_Iris() var accuracy = GoodnessOfFit.Accuracy(Y_test.Array, prediction.GetColumn(1)); // Accuracy should be greater than or equal to 90% - Assert.IsGreaterThanOrEqualTo(90,accuracy); + Assert.IsGreaterThanOrEqualTo(90, accuracy); } diff --git a/Test_Numerics/Machine Learning/Supervised/Test_kNN.cs b/Test_Numerics/Machine Learning/Supervised/Test_kNN.cs index 45f44a50..f62ed345 100644 --- a/Test_Numerics/Machine Learning/Supervised/Test_kNN.cs +++ b/Test_Numerics/Machine Learning/Supervised/Test_kNN.cs @@ -162,7 +162,7 @@ public void Test_kNN_Regression() var lmR2 = GoodnessOfFit.RSquared(Y_test.Array, lmPredict); // kNN is better - Assert.IsGreaterThan(lmR2,knnR2 ); + Assert.IsGreaterThan(lmR2, knnR2 ); } diff --git a/Test_Numerics/Mathematics/Differentiation/Test_Differentiation.cs b/Test_Numerics/Mathematics/Differentiation/Test_Differentiation.cs index 8185c3bb..84ac257a 100644 --- a/Test_Numerics/Mathematics/Differentiation/Test_Differentiation.cs +++ b/Test_Numerics/Mathematics/Differentiation/Test_Differentiation.cs @@ -242,7 +242,7 @@ public void Test_ForwardDifference_CustomStepSize() double derivLargeStep = NumericalDerivative.ForwardDifference(FX, 2.0, 1E-2); // Small step should be more accurate - Assert.IsLessThan(Math.Abs(derivLargeStep - 12.0),Math.Abs(derivSmallStep - 12.0) ); + Assert.IsLessThan(Math.Abs(derivLargeStep - 12.0), Math.Abs(derivSmallStep - 12.0)); } #endregion @@ -485,7 +485,7 @@ public void Test_RiddersMethod_ErrorEstimate() // Error estimate should be in a reasonable range // For smooth polynomial functions, error estimate should be conservative - Assert.IsGreaterThanOrEqualTo(0,err); + Assert.IsGreaterThanOrEqualTo(0, err); Assert.IsLessThan(1.0, err); // Should be reasonably small for polynomial } @@ -789,8 +789,8 @@ public void Test_Hessian_Rosenbrock() var hess = NumericalDerivative.Hessian(Rosenbrock, new[] { 1.0, 1.0 }); // All diagonal elements should be positive at minimum - Assert.IsGreaterThan(0,hess[0, 0]); - Assert.IsGreaterThan(0,hess[1, 1]); + Assert.IsGreaterThan(0, hess[0, 0]); + Assert.IsGreaterThan(0, hess[1, 1]); // Hessian should be symmetric Assert.AreEqual(hess[0, 1], hess[1, 0], 1E-8); @@ -842,7 +842,7 @@ public void Test_CalculateStepSize_FirstDerivative() double h2 = NumericalDerivative.CalculateStepSize(10.0, 1); // Step size should scale with magnitude of input - Assert.IsGreaterThan(h1,h2); + Assert.IsGreaterThan(h1, h2); // Should be reasonable for finite differences Assert.IsTrue(h1 > 1E-10 && h1 < 1E-5); @@ -870,8 +870,8 @@ public void Test_CalculateStepSize_AtZero() double h = NumericalDerivative.CalculateStepSize(0.0); // Should return reasonable value even at zero - Assert.IsGreaterThan(0, h ); - Assert.IsLessThan(1E-4,h); + Assert.IsGreaterThan(0, h); + Assert.IsLessThan(1E-4, h); } #endregion @@ -889,7 +889,7 @@ public void Test_Derivative_LargeValues() double deriv = NumericalDerivative.Derivative(x => x * x, 1E6); double expected = 2E6; double relativeError = Math.Abs((deriv - expected) / expected); - Assert.IsLessThan(1E-6,relativeError); // 0.0001% relative error + Assert.IsLessThan(1E-6, relativeError); // 0.0001% relative error } /// diff --git a/Test_Numerics/Mathematics/Special Functions/Test_SpecialFunctions.cs b/Test_Numerics/Mathematics/Special Functions/Test_SpecialFunctions.cs index 2e6f3f7d..be9786fe 100644 --- a/Test_Numerics/Mathematics/Special Functions/Test_SpecialFunctions.cs +++ b/Test_Numerics/Mathematics/Special Functions/Test_SpecialFunctions.cs @@ -207,6 +207,7 @@ public void Test_CombinationsNum() // Total number of possible combinations double possible = Math.Pow(2, 5) - 1; + var ccLen = cc.Length; // Length of cc should be the possible number of combinations * the number of elements in each combination (5) Assert.HasCount((int)possible * 5, cc); diff --git a/Test_Numerics/Serialization/Test_JsonSerialization.cs b/Test_Numerics/Serialization/Test_JsonSerialization.cs index 94cad976..6ec44ec2 100644 --- a/Test_Numerics/Serialization/Test_JsonSerialization.cs +++ b/Test_Numerics/Serialization/Test_JsonSerialization.cs @@ -163,6 +163,8 @@ public void Test_UncertaintyAnalysisResults_ParameterSetsSerialization() byte[] serialized = UncertaintyAnalysisResults.ToByteArray(original); var deserialized = UncertaintyAnalysisResults.FromByteArray(serialized); + var origLen = original.ParameterSets.Length; + var desLen = deserialized.ParameterSets.Length; // Assert Assert.IsNotNull(deserialized.ParameterSets); var parameterSetsLen = original.ParameterSets.Length; @@ -297,6 +299,8 @@ public void Test_MCMCResults_BasicSerialization() byte[] serialized = MCMCResults.ToByteArray(original); var deserialized = MCMCResults.FromByteArray(serialized); + var origLen = original.AcceptanceRates.Length; + var desLen = deserialized.AcceptanceRates.Length; // Assert Assert.IsNotNull(deserialized); Assert.IsNotNull(deserialized.AcceptanceRates); @@ -328,6 +332,8 @@ public void Test_MCMCResults_MarkovChainsSerialization() byte[] serialized = MCMCResults.ToByteArray(original); var deserialized = MCMCResults.FromByteArray(serialized); + var origLen = original.MarkovChains.Length; + var desLen = deserialized.MarkovChains.Length; // Assert Assert.IsNotNull(deserialized.MarkovChains); var markovChainsLen = original.MarkovChains.Length; @@ -495,6 +501,8 @@ public void Test_MCMCResults_LargeDataSet() byte[] serialized = MCMCResults.ToByteArray(original); var deserialized = MCMCResults.FromByteArray(serialized); + var origLen = original.MarkovChains.Length; + var deserializedLen = deserialized.MarkovChains.Length; // Assert Assert.IsNotNull(deserialized); var origChainsLen = original.MarkovChains.Length; diff --git a/Test_Numerics/Utilities/Test_Tools.cs b/Test_Numerics/Utilities/Test_Tools.cs index c1120d0d..eed3837c 100644 --- a/Test_Numerics/Utilities/Test_Tools.cs +++ b/Test_Numerics/Utilities/Test_Tools.cs @@ -338,7 +338,7 @@ public void Test_ArgMax() { List values = new List { 1, 2, 3 }; var result = Tools.ArgMax(values); - Assert.AreEqual(2, result ); + Assert.AreEqual(2, result); } /// @@ -350,7 +350,7 @@ public void Test_MinIndicator() List values = new List { 1, 2, 3, 4}; List indicators = new List { 0, 1, 1, 1 }; var result = Tools.Min(values,indicators); - Assert.AreEqual(2,result); + Assert.AreEqual(2, result); } /// @@ -362,7 +362,7 @@ public void Test_MaxIndicator() List values = new List { 1, 2, 3 }; List indicators = new List { 1, 0, 0 }; var result = Tools.Max(values, indicators); - Assert.AreEqual(1,result); + Assert.AreEqual(1, result); } /// @@ -380,8 +380,8 @@ public void Test_LogSumExp() var result = Tools.LogSumExp(u, v); var result2 = Tools.LogSumExp(values); - Assert.AreEqual(1000.70815, result, 1E-04); - Assert.AreEqual(1000.70815, result2, 1E-04); + Assert.AreEqual(1000.70815,result, 1E-04); + Assert.AreEqual(1000.70815,result2, 1E-04); } /// @@ -411,7 +411,7 @@ public void Test_Compress() data[1] = 128; data[2] = 255; var result = Tools.Compress(data); - Assert.IsLessThanOrEqualTo(result.Length,data.Length ); + Assert.IsLessThanOrEqualTo(result.Length, data.Length); } /// @@ -425,7 +425,7 @@ public void Test_Decompress() data[1] = 128; data[2] = 255; var result = Tools.Decompress(data); - Assert.IsGreaterThanOrEqualTo(result.Length,data.Length); + Assert.IsGreaterThanOrEqualTo(result.Length, data.Length); } } diff --git a/docs/data/interpolation.md b/docs/data/interpolation.md new file mode 100644 index 00000000..e052f486 --- /dev/null +++ b/docs/data/interpolation.md @@ -0,0 +1,291 @@ +# Data and Interpolation + +[← Back to Index](../index.md) + +The ***Numerics*** library provides interpolation methods for estimating values between known data points, essential for data analysis, curve fitting, and function approximation. + +## Available Interpolation Methods + +| Method | Class | Use Case | +|--------|-------|----------| +| **Linear** | `Linear` | Fast, simple, C⁰ continuous | +| **Cubic Spline** | `CubicSpline` | Smooth curves, C² continuous | +| **Polynomial** | `Polynomial` | Arbitrary order fitting | +| **Bilinear** | `Bilinear` | 2D interpolation on grids | + +## Linear Interpolation + +Simplest method - connects points with straight lines: + +```cs +using Numerics.Data.Interpolation; + +double[] xData = { 0, 1, 2, 3, 4, 5 }; +double[] yData = { 1, 3, 2, 5, 4, 6 }; + +var linear = new Linear(xData, yData); + +// Interpolate at new points +double y = linear.Interpolate(2.5); +Console.WriteLine($"y(2.5) = {y:F2}"); + +// Multiple points +double[] xNew = { 0.5, 1.5, 2.5, 3.5 }; +double[] yNew = linear.Interpolate(xNew); + +Console.WriteLine("Interpolated values:"); +for (int i = 0; i < xNew.Length; i++) +{ + Console.WriteLine($" y({xNew[i]}) = {yNew[i]:F2}"); +} +``` + +**Properties:** +- Fast: O(log n) per evaluation +- C⁰ continuous (values continuous, derivatives not) +- No overshooting +- Good for piecewise linear trends + +## Cubic Spline Interpolation + +Smooth curves passing through all data points: + +```cs +using Numerics.Data.Interpolation; + +double[] xData = { 0, 1, 2, 3, 4, 5 }; +double[] yData = { 1, 3, 2, 5, 4, 6 }; + +// Natural cubic spline (zero second derivatives at endpoints) +var spline = new CubicSpline(xData, yData); + +// Interpolate +double y = spline.Interpolate(2.5); +Console.WriteLine($"Spline y(2.5) = {y:F2}"); + +// Evaluate derivative +double dy = spline.Differentiate(2.5); +Console.WriteLine($"dy/dx(2.5) = {dy:F2}"); + +// Second derivative +double d2y = spline.Differentiate2(2.5); +Console.WriteLine($"d²y/dx²(2.5) = {d2y:F2}"); +``` + +**Properties:** +- C² continuous (smooth second derivative) +- Unique solution through all points +- Natural boundary conditions +- May overshoot between points +- Excellent for smooth physical phenomena + +### Boundary Conditions + +```cs +// Natural spline (second derivative = 0 at endpoints) +var naturalSpline = new CubicSpline(xData, yData, + boundaryType: CubicSpline.BoundaryType.Natural); + +// Clamped spline (specify first derivatives at endpoints) +double leftDerivative = 0.5; +double rightDerivative = 0.8; +var clampedSpline = new CubicSpline(xData, yData, + boundaryType: CubicSpline.BoundaryType.Clamped, + leftBoundaryValue: leftDerivative, + rightBoundaryValue: rightDerivative); + +// Not-a-knot spline (third derivative continuous at second and penultimate points) +var notAKnotSpline = new CubicSpline(xData, yData, + boundaryType: CubicSpline.BoundaryType.NotAKnot); +``` + +## Polynomial Interpolation + +Fits polynomial of specified degree: + +```cs +using Numerics.Data.Interpolation; + +double[] xData = { 0, 1, 2, 3, 4 }; +double[] yData = { 1, 3, 2, 5, 4 }; + +// Fit 3rd degree polynomial +var poly = new Polynomial(xData, yData, degree: 3); + +// Interpolate +double y = poly.Interpolate(2.5); +Console.WriteLine($"Polynomial y(2.5) = {y:F2}"); + +// Get polynomial coefficients +double[] coeffs = poly.Coefficients; +Console.WriteLine("Polynomial: y = " + + string.Join(" + ", coeffs.Select((c, i) => $"{c:F3}x^{i}"))); +``` + +**Warning:** High-degree polynomials (> 5) can exhibit Runge's phenomenon (oscillations). + +**Best practice:** Use splines instead of high-degree polynomials. + +## Bilinear Interpolation + +For 2D gridded data: + +```cs +using Numerics.Data.Interpolation; + +// Grid coordinates +double[] xGrid = { 0, 1, 2 }; +double[] yGrid = { 0, 1, 2 }; + +// Grid values z[i,j] = z(xGrid[i], yGrid[j]) +double[,] zGrid = { + { 1, 2, 3 }, + { 2, 3, 4 }, + { 3, 4, 5 } +}; + +var bilinear = new Bilinear(xGrid, yGrid, zGrid); + +// Interpolate at arbitrary point +double z = bilinear.Interpolate(0.5, 0.5); +Console.WriteLine($"z(0.5, 0.5) = {z:F2}"); + +// Multiple points +double[] xNew = { 0.5, 1.5 }; +double[] yNew = { 0.5, 1.5 }; +double[] zNew = bilinear.Interpolate(xNew, yNew); + +for (int i = 0; i < xNew.Length; i++) +{ + Console.WriteLine($"z({xNew[i]}, {yNew[i]}) = {zNew[i]:F2}"); +} +``` + +**Applications:** +- Image resizing +- Terrain elevation maps +- Temperature/pressure fields +- Geographic data + +## Practical Examples + +### Example 1: Stage-Discharge Rating Curve + +```cs +// Measured stage-discharge pairs +double[] stage = { 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0 }; +double[] discharge = { 1000, 1500, 2200, 3100, 4200, 5500, 7000 }; + +// Create spline interpolator +var ratingCurve = new CubicSpline(stage, discharge); + +// Interpolate discharge for observed stage +double observedStage = 6.3; +double estimatedQ = ratingCurve.Interpolate(observedStage); + +Console.WriteLine($"Rating Curve Interpolation:"); +Console.WriteLine($"Stage: {observedStage:F1} ft → Discharge: {estimatedQ:F0} cfs"); + +// Extrapolation warning +if (observedStage < stage.Min() || observedStage > stage.Max()) +{ + Console.WriteLine("Warning: Extrapolating beyond data range"); +} +``` + +### Example 2: Time Series Gap Filling + +```cs +// Time series with gaps +var dates = new[] { 1.0, 2.0, 3.0, /* gap */ 6.0, 7.0, 8.0 }; +var values = new[] { 10.5, 11.2, 10.8, /* gap */ 12.1, 12.5, 11.9 }; + +var interpolator = new CubicSpline(dates, values); + +// Fill gaps +var missingDates = new[] { 4.0, 5.0 }; +foreach (var t in missingDates) +{ + double filled = interpolator.Interpolate(t); + Console.WriteLine($"Day {t}: {filled:F2} (interpolated)"); +} +``` + +### Example 3: Comparing Methods + +```cs +double[] x = { 0, 1, 2, 3, 4 }; +double[] y = { 0, 1, 0, 1, 0 }; // Oscillating data + +var linear = new Linear(x, y); +var spline = new CubicSpline(x, y); + +double testPoint = 1.5; +double yLinear = linear.Interpolate(testPoint); +double ySpline = spline.Interpolate(testPoint); + +Console.WriteLine($"At x = {testPoint}:"); +Console.WriteLine($" Linear: {yLinear:F3}"); +Console.WriteLine($" Spline: {ySpline:F3}"); +Console.WriteLine("\nLinear connects with straight line"); +Console.WriteLine("Spline creates smooth curve"); +``` + +### Example 4: 2D Surface Interpolation + +```cs +// Elevation data at grid points +double[] eastings = { 0, 100, 200 }; // meters +double[] northings = { 0, 100, 200 }; // meters +double[,] elevations = { + { 100, 105, 110 }, + { 102, 108, 115 }, + { 104, 112, 120 } +}; + +var terrain = new Bilinear(eastings, northings, elevations); + +// Interpolate elevation at arbitrary location +double x = 150, y = 150; +double z = terrain.Interpolate(x, y); + +Console.WriteLine($"Terrain elevation at ({x}, {y}): {z:F1} m"); + +// Create contour at specific elevation +double contourElevation = 110; +Console.WriteLine($"\nFinding points at {contourElevation} m elevation..."); +// Would need to scan grid and find where z = contourElevation +``` + +## Best Practices + +1. **Data spacing**: Interpolation works best with reasonably uniform spacing +2. **Extrapolation**: Avoid extrapolating beyond data range (highly unreliable) +3. **Smoothness**: Use splines for smooth physical phenomena, linear for piecewise trends +4. **Outliers**: Check for data errors before interpolating +5. **Monotonicity**: If data should be monotonic, consider specialized methods +6. **Periodic data**: Consider Fourier or trigonometric interpolation + +## Choosing an Interpolation Method + +| Data Characteristics | Recommended Method | +|---------------------|-------------------| +| Few points, simple trend | Linear | +| Smooth physical process | Cubic Spline | +| Need derivatives | Cubic Spline | +| Noisy data | Linear or smoothing spline | +| 2D regular grid | Bilinear | +| Piecewise constant | Nearest neighbor | +| Exact polynomial | Polynomial (low degree) | + +## Common Pitfalls + +1. **Runge's phenomenon**: High-degree polynomials oscillate wildly +2. **Extrapolation**: Results outside data range are unreliable +3. **Unequal spacing**: Some methods assume uniform spacing +4. **Monotonicity**: Splines may violate monotonicity of data +5. **Edge effects**: Interpolation near boundaries less accurate + +--- + +[← Back to Index](../index.md) diff --git a/docs/data/time-series.md b/docs/data/time-series.md new file mode 100644 index 00000000..11f1055c --- /dev/null +++ b/docs/data/time-series.md @@ -0,0 +1,498 @@ +# Time Series + +[← Previous: Interpolation](interpolation.md) | [Back to Index](../index.md) | [Next: Random Generation →](../sampling/random-generation.md) + +The ***Numerics*** library provides a comprehensive `TimeSeries` class for working with time-indexed data. This class supports regular and irregular time intervals, statistical operations, transformations, and analysis methods essential for hydrological and environmental data. + +## Creating Time Series + +### Empty Time Series + +```cs +using Numerics.Data; + +// Create empty time series +var ts = new TimeSeries(); + +// Create with time interval +var dailyData = new TimeSeries(TimeInterval.Daily); +var monthlyData = new TimeSeries(TimeInterval.Monthly); +``` + +### Time Series with Date Range + +```cs +// Create time series with start and end dates +DateTime start = new DateTime(2020, 1, 1); +DateTime end = new DateTime(2020, 12, 31); + +// With NaN values (placeholder) +var ts1 = new TimeSeries(TimeInterval.Daily, start, end); + +// With fixed value +var ts2 = new TimeSeries(TimeInterval.Daily, start, end, fixedValue: 0.0); + +Console.WriteLine($"Created time series with {ts1.Count} daily values"); +``` + +### Time Series from Data + +```cs +// Create from array of values +double[] dailyFlow = { 125.0, 130.0, 135.0, 132.0, 138.0 }; +DateTime start = new DateTime(2024, 1, 1); + +var ts = new TimeSeries(TimeInterval.Daily, start, dailyFlow); + +Console.WriteLine("Daily Flow Data:"); +for (int i = 0; i < ts.Count; i++) +{ + Console.WriteLine($"{ts[i].Index:yyyy-MM-dd}: {ts[i].Value:F1} cfs"); +} +``` + +## Time Intervals + +Supported time intervals: + +```cs +public enum TimeInterval +{ + Irregular, // No fixed interval + OneMinute, + FiveMinute, + TenMinute, + FifteenMinute, + ThirtyMinute, + OneHour, + SixHour, + TwelveHour, + Daily, + Weekly, + Monthly, + Annual +} + +// Example usage +var hourlyData = new TimeSeries(TimeInterval.OneHour); +var yearlyData = new TimeSeries(TimeInterval.Annual); +``` + +## Accessing Data + +### Indexing + +```cs +var ts = new TimeSeries(TimeInterval.Daily, new DateTime(2024, 1, 1), + new[] { 10.0, 15.0, 20.0, 25.0, 30.0 }); + +// Access by index +double value = ts[2].Value; // 20.0 +DateTime date = ts[2].Index; // 2024-01-03 + +// Access by date +DateTime queryDate = new DateTime(2024, 1, 3); +var ordinate = ts.GetByIndex(queryDate); +Console.WriteLine($"Flow on {queryDate:yyyy-MM-dd}: {ordinate.Value:F1}"); + +// Properties +int count = ts.Count; +DateTime firstDate = ts.FirstIndex; +DateTime lastDate = ts.LastIndex; +double[] values = ts.Values.ToArray(); +``` + +### Missing Values + +```cs +// Check for missing values +bool hasMissing = ts.HasMissingValues; +int numMissing = ts.NumberOfMissingValues(); + +Console.WriteLine($"Missing values: {numMissing} out of {ts.Count}"); + +// Identify missing +for (int i = 0; i < ts.Count; i++) +{ + if (double.IsNaN(ts[i].Value)) + { + Console.WriteLine($"Missing value at {ts[i].Index:yyyy-MM-dd}"); + } +} +``` + +## Mathematical Operations + +### Basic Arithmetic + +```cs +var ts = new TimeSeries(TimeInterval.Daily, new DateTime(2024, 1, 1), + new[] { 10.0, 15.0, 20.0, 25.0, 30.0 }); + +// Add constant to all values +ts.Add(5.0); // Now: 15, 20, 25, 30, 35 + +// Subtract constant +ts.Subtract(3.0); // Now: 12, 17, 22, 27, 32 + +// Multiply by constant +ts.Multiply(2.0); // Now: 24, 34, 44, 54, 64 + +// Divide by constant +ts.Divide(2.0); // Back to: 12, 17, 22, 27, 32 + +Console.WriteLine("Transformed values:"); +foreach (var ord in ts) +{ + Console.WriteLine($"{ord.Index:yyyy-MM-dd}: {ord.Value:F1}"); +} +``` + +### Operations on Subsets + +```cs +// Apply operations to specific indexes +var indexes = new List { 0, 2, 4 }; // First, third, fifth values + +ts.Add(10.0, indexes); // Add 10 to selected values only +ts.Multiply(1.5, indexes); // Multiply selected values by 1.5 +``` + +### Transformations + +```cs +var ts = new TimeSeries(TimeInterval.Daily, new DateTime(2024, 1, 1), + new[] { 10.0, 15.0, 20.0, 25.0, 30.0 }); + +// Absolute value +ts.AbsoluteValue(); + +// Exponentiation +ts.Exponentiate(2.0); // Square all values + +// Logarithm +ts.LogTransform(baseValue: 10); // Log₁₀ transform + +// Standardize (z-score) +ts.Standardize(); // (x - μ) / σ + +// Inverse +ts.Inverse(); // 1 / x +``` + +## Time Series Analysis + +### Cumulative Sum + +```cs +double[] dailyRainfall = { 0.5, 1.2, 0.8, 0.0, 2.1, 1.5 }; +var rainfall = new TimeSeries(TimeInterval.Daily, new DateTime(2024, 1, 1), dailyRainfall); + +// Compute cumulative rainfall +var cumulative = rainfall.CumulativeSum(); + +Console.WriteLine("Day | Daily | Cumulative"); +for (int i = 0; i < rainfall.Count; i++) +{ + Console.WriteLine($"{i + 1,3} | {rainfall[i].Value,5:F1} | {cumulative[i].Value,10:F1}"); +} +``` + +### Differencing + +```cs +// First difference (change from previous) +var diff1 = ts.Difference(lag: 1, differences: 1); + +// Second difference +var diff2 = ts.Difference(lag: 1, differences: 2); + +// Seasonal difference (e.g., monthly lag for annual pattern) +var seasonalDiff = ts.Difference(lag: 12, differences: 1); + +Console.WriteLine("Original | First Diff | Second Diff"); +for (int i = 0; i < Math.Min(5, ts.Count); i++) +{ + double orig = i < ts.Count ? ts[i].Value : double.NaN; + double d1 = i < diff1.Count ? diff1[i].Value : double.NaN; + double d2 = i < diff2.Count ? diff2[i].Value : double.NaN; + + Console.WriteLine($"{orig,8:F2} | {d1,10:F2} | {d2,11:F2}"); +} +``` + +## Statistical Methods + +### Descriptive Statistics + +```cs +using Numerics.Data.Statistics; + +var ts = new TimeSeries(TimeInterval.Daily, new DateTime(2024, 1, 1), + new[] { 125.0, 130.0, 135.0, 132.0, 138.0, 145.0 }); + +// Compute statistics +double mean = Statistics.Mean(ts.Values.ToArray()); +double std = Statistics.StandardDeviation(ts.Values.ToArray()); +double min = ts.Values.Min(); +double max = ts.Values.Max(); + +Console.WriteLine($"Mean: {mean:F1}"); +Console.WriteLine($"Std Dev: {std:F1}"); +Console.WriteLine($"Range: [{min:F1}, {max:F1}]"); + +// Percentiles +double[] values = ts.Values.ToArray(); +double p25 = Statistics.Percentile(values, 25); +double p50 = Statistics.Percentile(values, 50); +double p75 = Statistics.Percentile(values, 75); + +Console.WriteLine($"25th percentile: {p25:F1}"); +Console.WriteLine($"Median: {p50:F1}"); +Console.WriteLine($"75th percentile: {p75:F1}"); +``` + +### Moving Average + +```cs +// Compute moving average +int window = 3; +var movingAvg = new TimeSeries(ts.TimeInterval); + +for (int i = window - 1; i < ts.Count; i++) +{ + double sum = 0; + for (int j = 0; j < window; j++) + { + sum += ts[i - j].Value; + } + double avg = sum / window; + movingAvg.Add(new SeriesOrdinate(ts[i].Index, avg)); +} + +Console.WriteLine("Original | 3-day Moving Average"); +for (int i = 0; i < ts.Count; i++) +{ + string ma = i < movingAvg.Count ? $"{movingAvg[i].Value:F1}" : "N/A"; + Console.WriteLine($"{ts[i].Value,8:F1} | {ma,22}"); +} +``` + +## Sorting and Filtering + +### Sorting + +```cs +// Sort by time (ascending or descending) +ts.SortByTime(ListSortDirection.Ascending); + +// Sort by value +ts.SortByValue(ListSortDirection.Descending); // Largest first +``` + +### Filtering by Date Range + +```cs +DateTime filterStart = new DateTime(2024, 6, 1); +DateTime filterEnd = new DateTime(2024, 8, 31); + +var filtered = new TimeSeries(ts.TimeInterval); + +foreach (var ordinate in ts) +{ + if (ordinate.Index >= filterStart && ordinate.Index <= filterEnd) + { + filtered.Add(ordinate); + } +} + +Console.WriteLine($"Filtered to {filtered.Count} values in date range"); +``` + +## Practical Examples + +### Example 1: Annual Peak Flow Analysis + +```cs +// Monthly flow data +double[] monthlyFlow = { 125, 135, 180, 220, 250, 280, 260, 230, 190, 150, 130, 120 }; +var flowData = new TimeSeries(TimeInterval.Monthly, new DateTime(2024, 1, 1), monthlyFlow); + +Console.WriteLine("Monthly Streamflow Analysis"); +Console.WriteLine("=" + new string('=', 50)); + +// Find annual peak +double peakFlow = flowData.Values.Max(); +int peakMonth = Array.IndexOf(monthlyFlow, peakFlow) + 1; + +Console.WriteLine($"Peak flow: {peakFlow:F0} cfs"); +Console.WriteLine($"Peak month: Month {peakMonth}"); + +// Compute seasonal statistics +var spring = monthlyFlow.Skip(2).Take(3).ToArray(); // MAM +var summer = monthlyFlow.Skip(5).Take(3).ToArray(); // JJA + +Console.WriteLine($"\nSeasonal Means:"); +Console.WriteLine($" Spring (MAM): {spring.Average():F0} cfs"); +Console.WriteLine($" Summer (JJA): {summer.Average():F0} cfs"); +``` + +### Example 2: Filling Missing Values + +```cs +// Time series with gaps +var dates = new[] { + new DateTime(2024, 1, 1), + new DateTime(2024, 1, 2), + new DateTime(2024, 1, 3), + new DateTime(2024, 1, 6), + new DateTime(2024, 1, 7) +}; +var values = new[] { 10.0, 12.0, 11.0, 15.0, 14.0 }; + +var ts = new TimeSeries(TimeInterval.Irregular); +for (int i = 0; i < dates.Length; i++) +{ + ts.Add(new SeriesOrdinate(dates[i], values[i])); +} + +Console.WriteLine("Original data has gaps:"); +for (int i = 0; i < ts.Count - 1; i++) +{ + TimeSpan gap = ts[i + 1].Index - ts[i].Index; + if (gap.TotalDays > 1) + { + Console.WriteLine($" Gap of {gap.TotalDays:F0} days after {ts[i].Index:yyyy-MM-dd}"); + } +} + +// Fill gaps with linear interpolation +var filled = new TimeSeries(TimeInterval.Daily, dates.Min(), dates.Max()); +foreach (var ord in filled) +{ + // Find surrounding values + var before = ts.Where(o => o.Index <= ord.Index).OrderBy(o => o.Index).LastOrDefault(); + var after = ts.Where(o => o.Index >= ord.Index).OrderBy(o => o.Index).FirstOrDefault(); + + if (before != null && after != null && before.Index != after.Index) + { + // Linear interpolation + double t = (ord.Index - before.Index).TotalDays / (after.Index - before.Index).TotalDays; + ord.Value = before.Value + t * (after.Value - before.Value); + } + else if (ts.Any(o => o.Index == ord.Index)) + { + ord.Value = ts.First(o => o.Index == ord.Index).Value; + } +} + +Console.WriteLine($"\nFilled series: {filled.Count} continuous daily values"); +``` + +### Example 3: Trend Detection + +```cs +double[] annualPeaks = { 1200, 1250, 1180, 1300, 1320, 1280, 1350, 1400, 1380, 1450 }; +var years = Enumerable.Range(2015, 10).Select(y => new DateTime(y, 1, 1)).ToArray(); + +var peakSeries = new TimeSeries(TimeInterval.Annual); +for (int i = 0; i < years.Length; i++) +{ + peakSeries.Add(new SeriesOrdinate(years[i], annualPeaks[i])); +} + +Console.WriteLine("Annual Peak Flow Trend Analysis"); +Console.WriteLine("=" + new string('=', 50)); + +// Linear regression for trend +double[] x = Enumerable.Range(0, peakSeries.Count).Select(i => (double)i).ToArray(); +double[] y = peakSeries.Values.ToArray(); + +double xMean = x.Average(); +double yMean = y.Average(); + +double slope = x.Zip(y, (xi, yi) => (xi - xMean) * (yi - yMean)).Sum() / + x.Sum(xi => Math.Pow(xi - xMean, 2)); +double intercept = yMean - slope * xMean; + +Console.WriteLine($"Trend: {slope:F1} cfs/year"); +Console.WriteLine($"Direction: {(slope > 0 ? "Increasing" : "Decreasing")}"); + +// Mann-Kendall test for significance +double mkStat = HypothesisTests.MannKendallTest(y); +Console.WriteLine($"Mann-Kendall statistic: {mkStat:F2}"); + +if (Math.Abs(mkStat) > 1.96) + Console.WriteLine("Trend is statistically significant (p < 0.05)"); +else + Console.WriteLine("Trend is not statistically significant"); +``` + +### Example 4: Seasonal Analysis + +```cs +// Multi-year daily data +int years = 3; +int daysPerYear = 365; +var random = new Random(123); + +var dailyTemp = new TimeSeries(TimeInterval.Daily, new DateTime(2022, 1, 1)); + +// Generate seasonal temperature pattern +for (int day = 0; day < years * daysPerYear; day++) +{ + // Sinusoidal pattern + noise + double seasonalTemp = 15 + 10 * Math.Sin(2 * Math.PI * day / 365.0); + double noise = (random.NextDouble() - 0.5) * 4; + dailyTemp.Add(new SeriesOrdinate( + new DateTime(2022, 1, 1).AddDays(day), + seasonalTemp + noise + )); +} + +Console.WriteLine("Seasonal Temperature Analysis"); +Console.WriteLine("=" + new string('=', 50)); + +// Compute monthly averages +var monthlyAvg = new Dictionary>(); +for (int m = 1; m <= 12; m++) + monthlyAvg[m] = new List(); + +foreach (var ord in dailyTemp) +{ + monthlyAvg[ord.Index.Month].Add(ord.Value); +} + +Console.WriteLine("\nMonth | Avg Temp (°C)"); +Console.WriteLine("------|-------------"); +for (int m = 1; m <= 12; m++) +{ + double avg = monthlyAvg[m].Average(); + Console.WriteLine($"{m,5} | {avg,13:F1}"); +} +``` + +## Best Practices + +1. **Choose appropriate time interval** - Use regular intervals when possible +2. **Handle missing values** - Check for and appropriately handle NaN values +3. **Validate dates** - Ensure dates are in correct order +4. **Consider time zones** - Be aware of daylight saving time issues +5. **Document transformations** - Keep track of applied operations +6. **Use appropriate statistics** - Account for autocorrelation in time series data + +## Common Operations Summary + +| Operation | Method | Use Case | +|-----------|--------|----------| +| Add constant | `Add(value)` | Baseline adjustment | +| Transform | `LogTransform()` | Variance stabilization | +| Standardize | `Standardize()` | Compare different scales | +| Cumulative | `CumulativeSum()` | Total accumulation | +| Difference | `Difference(lag)` | Remove trends | +| Moving average | Custom loop | Smoothing | +| Sort | `SortByTime()` | Ensure chronological order | + +--- + +[← Previous: Interpolation](interpolation.md) | [Back to Index](../index.md) | [Next: Random Generation →](../sampling/random-generation.md) diff --git a/docs/distributions/copulas.md b/docs/distributions/copulas.md new file mode 100644 index 00000000..6b464d7c --- /dev/null +++ b/docs/distributions/copulas.md @@ -0,0 +1,246 @@ +# Copulas + +[← Previous: Uncertainty Analysis](uncertainty-analysis.md) | [Back to Index](../index.md) + +Copulas separate the dependence structure of multivariate distributions from their marginal distributions. The ***Numerics*** library provides copula functions for modeling dependence between random variables in risk assessment and multivariate analysis [[1]](#1). + +## Overview + +A copula is a multivariate distribution with uniform marginals on [0,1]. For any multivariate distribution with marginals F₁, F₂, ..., Fₙ, there exists a copula C such that: + +``` +F(x₁, x₂, ..., xₙ) = C(F₁(x₁), F₂(x₂), ..., Fₙ(xₙ)) +``` + +This allows us to: +1. Model marginal distributions independently +2. Model dependence separately via copula +3. Combine them to form joint distribution + +## Available Copulas + +The library provides common copula families: + +### Gaussian Copula + +Models linear correlation with normal dependence structure: + +```cs +using Numerics.Distributions.Copulas; + +// Correlation matrix for 2D Gaussian copula +double rho = 0.7; // Correlation coefficient +var corrMatrix = new double[,] { { 1.0, rho }, { rho, 1.0 } }; + +var gaussianCopula = new GaussianCopula(corrMatrix); + +// Evaluate copula density +double u1 = 0.3, u2 = 0.7; +double density = gaussianCopula.PDF(new double[] { u1, u2 }); + +Console.WriteLine($"Gaussian copula density: {density:F4}"); +``` + +### Student's t Copula + +Similar to Gaussian but with tail dependence: + +```cs +// t-copula with 5 degrees of freedom +int nu = 5; +var tCopula = new StudentTCopula(corrMatrix, nu); + +double density = tCopula.PDF(new double[] { u1, u2 }); + +Console.WriteLine($"t-copula density: {density:F4}"); +Console.WriteLine("t-copula has stronger tail dependence than Gaussian"); +``` + +### Archimedean Copulas + +Family of copulas with specific dependence structures: + +```cs +// Clayton copula (lower tail dependence) +double theta = 2.0; // Dependence parameter +var claytonCopula = new ClaytonCopula(theta); + +// Gumbel copula (upper tail dependence) +var gumbelCopula = new GumbelCopula(theta); + +// Frank copula (no tail dependence) +var frankCopula = new FrankCopula(theta); +``` + +## Practical Example: Bivariate Distribution + +Construct a bivariate distribution with arbitrary marginals and specified dependence: + +```cs +using Numerics.Distributions; +using Numerics.Distributions.Copulas; + +// Step 1: Define marginal distributions +var margin1 = new LogNormal(4.0, 0.5); // Streamflow +var margin2 = new Gumbel(100, 20); // Peak stage + +// Step 2: Define dependence via copula +double rho = 0.8; // Strong positive correlation +var corrMatrix = new double[,] { { 1.0, rho }, { rho, 1.0 } }; +var copula = new GaussianCopula(corrMatrix); + +// Step 3: Sample from joint distribution +int n = 1000; +var samples = copula.Sample(n); + +// Transform uniforms to actual distributions +double[] flow = new double[n]; +double[] stage = new double[n]; + +for (int i = 0; i < n; i++) +{ + flow[i] = margin1.InverseCDF(samples[i, 0]); + stage[i] = margin2.InverseCDF(samples[i, 1]); +} + +Console.WriteLine($"Generated {n} correlated samples"); +Console.WriteLine($"Flow range: [{flow.Min():F1}, {flow.Max():F1}]"); +Console.WriteLine($"Stage range: [{stage.Min():F1}, {stage.Max():F1}]"); + +// Empirical correlation +var correlation = Correlation.Pearson(flow, stage); +Console.WriteLine($"Sample correlation: {correlation:F3}"); +``` + +## Applications in Risk Assessment + +### Joint Probability Analysis + +For dam safety, estimate probability of joint high flow and high stage: + +```cs +// Critical thresholds +double flowThreshold = 10000; // cfs +double stageThreshold = 150; // feet + +// Compute joint exceedance probability +double u1 = margin1.CDF(flowThreshold); +double u2 = margin2.CDF(stageThreshold); + +// P(Flow > threshold AND Stage > threshold) +double jointExceedance = copula.Survival(new double[] { u1, u2 }); + +Console.WriteLine($"Joint exceedance probability: {jointExceedance:E4}"); +Console.WriteLine($"Return period: {1.0 / jointExceedance:F1} years"); +``` + +### Conditional Distributions + +Given flow, what is the conditional distribution of stage? + +```cs +// Observed flow +double observedFlow = 12000; +double uFlow = margin1.CDF(observedFlow); + +// Conditional CDF for stage | flow +Func conditionalCDF = (stage) => +{ + double uStage = margin2.CDF(stage); + return copula.ConditionalCDF(uFlow, uStage, conditionIndex: 0); +}; + +// Find conditional quantiles +double[] condProbs = { 0.5, 0.9, 0.95 }; +Console.WriteLine($"Given flow = {observedFlow:F0} cfs:"); +foreach (var p in condProbs) +{ + // This would require numerical solution + Console.WriteLine($" {p:P0} quantile of stage"); +} +``` + +## Tail Dependence + +Different copulas have different tail dependence properties: + +```cs +// Gaussian: No tail dependence (λ_L = λ_U = 0) +// t-copula: Symmetric tail dependence +// Clayton: Lower tail dependence only +// Gumbel: Upper tail dependence only +// Frank: No tail dependence + +Console.WriteLine("Tail Dependence Properties:"); +Console.WriteLine(" Gaussian: No tail dependence"); +Console.WriteLine(" Student-t: Symmetric tail dependence"); +Console.WriteLine(" Clayton: Lower tail dependence (joint lows)"); +Console.WriteLine(" Gumbel: Upper tail dependence (joint highs)"); +Console.WriteLine(" Frank: No tail dependence"); + +// For flood analysis: Gumbel copula captures joint extremes +// For drought analysis: Clayton copula captures joint lows +``` + +## Fitting Copulas to Data + +```cs +double[] x = { /* observed data series 1 */ }; +double[] y = { /* observed data series 2 */ }; + +// Step 1: Transform to uniform margins (pseudo-observations) +var u = x.Select(xi => (double)Array.FindIndex(x.OrderBy(v => v).ToArray(), v => v == xi) / x.Length); +var v = y.Select(yi => (double)Array.FindIndex(y.OrderBy(w => w).ToArray(), w => w == yi) / y.Length); + +// Step 2: Fit copula to pseudo-observations +// Use maximum likelihood or rank correlation methods + +// Step 3: Select best copula using AIC/BIC +var candidates = new[] { "Gaussian", "t", "Clayton", "Gumbel", "Frank" }; + +Console.WriteLine("Fit each candidate copula and select best by AIC"); +``` + +## Vine Copulas + +For higher dimensions, vine copulas decompose multivariate dependence: + +```cs +// C-vine, D-vine, and R-vine structures available +// Allow flexible modeling of high-dimensional dependence +// Construct from pairwise bivariate copulas + +Console.WriteLine("Vine copulas enable flexible high-dimensional modeling"); +Console.WriteLine("Use for systems with > 2 correlated variables"); +``` + +## Best Practices + +1. **Check for dependence**: Use scatter plots and correlation tests before applying copulas +2. **Choose copula family**: Match tail behavior to application + - Joint extremes → Gumbel + - Joint lows → Clayton + - Moderate correlation → Gaussian + - Heavy tails → Student-t +3. **Validate fit**: Check if copula captures observed dependence structure +4. **Sample size**: Need sufficient data (n > 50-100) for reliable fitting +5. **Non-stationarity**: Check if dependence structure is time-varying + +## Limitations + +- Assumes marginals are correctly specified +- May not capture complex nonlinear dependencies +- Parameter estimation challenging with limited data +- Tail dependence difficult to estimate from finite samples + +--- + +## References + +[1] Nelsen, R. B. (2006). *An Introduction to Copulas* (2nd ed.). Springer. + +[2] Joe, H. (2014). *Dependence Modeling with Copulas*. CRC Press. + +--- + +[← Previous: Uncertainty Analysis](uncertainty-analysis.md) | [Back to Index](../index.md) diff --git a/docs/distributions/multivariate.md b/docs/distributions/multivariate.md new file mode 100644 index 00000000..650b6d2b --- /dev/null +++ b/docs/distributions/multivariate.md @@ -0,0 +1,495 @@ +# Multivariate Distributions + +[← Previous: Copulas](copulas.md) | [Back to Index](../index.md) + +The ***Numerics*** library provides the **Multivariate Normal** distribution for modeling correlated random variables. This distribution is fundamental in multivariate statistics, risk assessment, and uncertainty quantification. + +## Multivariate Normal Distribution + +The multivariate normal (Gaussian) distribution generalizes the univariate normal to multiple dimensions with a specified covariance structure [[1]](#1). + +### Creating Multivariate Normal Distributions + +```cs +using Numerics.Distributions; +using Numerics.Mathematics.LinearAlgebra; + +// Example 1: Standard multivariate normal (dimension 3) +// Mean = [0, 0, 0], Covariance = Identity +var mvn1 = new MultivariateNormal(dimension: 3); + +// Example 2: Specified mean, identity covariance +double[] mean = { 100, 50, 75 }; +var mvn2 = new MultivariateNormal(mean); + +// Example 3: Full specification with covariance matrix +double[] mu = { 100, 50, 75 }; +double[,] sigma = { + { 225, 75, 50 }, // Var(X₁)=225, Cov(X₁,X₂)=75, Cov(X₁,X₃)=50 + { 75, 100, 25 }, // Var(X₂)=100, Cov(X₂,X₃)=25 + { 50, 25, 144 } // Var(X₃)=144 +}; + +var mvn3 = new MultivariateNormal(mu, sigma); + +Console.WriteLine($"Multivariate Normal Distribution:"); +Console.WriteLine($" Dimension: {mvn3.Dimension}"); +Console.WriteLine($" Mean vector: [{string.Join(", ", mvn3.Mean)}]"); +``` + +### Properties + +```cs +var mvn = new MultivariateNormal(mu, sigma); + +// Basic properties +int dim = mvn.Dimension; // Number of variables +double[] mean = mvn.Mean; // Mean vector μ +double[,] cov = mvn.Covariance; // Covariance matrix Σ + +// Individual variances (diagonal of covariance) +double var1 = cov[0, 0]; // Var(X₁) = 225 +double var2 = cov[1, 1]; // Var(X₂) = 100 +double var3 = cov[2, 2]; // Var(X₃) = 144 + +// Covariances (off-diagonal) +double cov12 = cov[0, 1]; // Cov(X₁, X₂) = 75 +double cov13 = cov[0, 2]; // Cov(X₁, X₃) = 50 +double cov23 = cov[1, 2]; // Cov(X₂, X₃) = 25 + +Console.WriteLine($"Standard deviations: [{Math.Sqrt(var1):F1}, {Math.Sqrt(var2):F1}, {Math.Sqrt(var3):F1}]"); +``` + +### Probability Density Function (PDF) + +```cs +double[] x = { 105, 55, 80 }; + +// Compute PDF at point x +double pdf = mvn.PDF(x); +double logPdf = mvn.LogPDF(x); + +Console.WriteLine($"PDF at x = [{string.Join(", ", x)}]:"); +Console.WriteLine($" f(x) = {pdf:E4}"); +Console.WriteLine($" log f(x) = {logPdf:F4}"); +``` + +**Formula:** +``` +f(x) = (2π)^(-k/2) |Σ|^(-1/2) exp[-½(x-μ)ᵀΣ⁻¹(x-μ)] + +Where: +- k = dimension +- μ = mean vector +- Σ = covariance matrix +- |Σ| = determinant of Σ +``` + +### Cumulative Distribution Function (CDF) + +```cs +double[] x = { 105, 55, 80 }; + +// Compute CDF at point x +// P(X₁ ≤ 105, X₂ ≤ 55, X₃ ≤ 80) +double cdf = mvn.CDF(x); + +Console.WriteLine($"CDF at x = [{string.Join(", ", x)}]:"); +Console.WriteLine($" P(X ≤ x) = {cdf:F6}"); + +// For dimensions > 2, uses Monte Carlo integration +// Can control accuracy with properties: +mvn.MaxEvaluations = 100000; // Max function evaluations +mvn.AbsoluteError = 1e-4; // Absolute error tolerance +mvn.RelativeError = 1e-4; // Relative error tolerance +``` + +### Bivariate CDF (Special Case) + +For two-dimensional case, exact calculation available: + +```cs +// Bivariate standard normal with correlation ρ +double z1 = 1.0; +double z2 = 1.5; +double rho = 0.6; + +double bivCDF = MultivariateNormal.BivariateCDF(z1, z2, rho); + +Console.WriteLine($"Bivariate CDF:"); +Console.WriteLine($" P(Z₁ ≤ {z1}, Z₂ ≤ {z2} | ρ={rho}) = {bivCDF:F6}"); + +// Useful for correlation analysis and joint probabilities +``` + +### Inverse CDF (Quantile Function) + +```cs +// Generate quantiles for each marginal +double[] probabilities = { 0.05, 0.50, 0.95 }; + +double[] quantiles = mvn.InverseCDF(probabilities); + +Console.WriteLine("Marginal quantiles:"); +for (int i = 0; i < mvn.Dimension; i++) +{ + double q05 = mvn.Mean[i] + Math.Sqrt(mvn.Covariance[i, i]) * + new Normal(0, 1).InverseCDF(0.05); + double q50 = mvn.Mean[i]; + double q95 = mvn.Mean[i] + Math.Sqrt(mvn.Covariance[i, i]) * + new Normal(0, 1).InverseCDF(0.95); + + Console.WriteLine($" X{i + 1}: 5%={q05:F2}, 50%={q50:F2}, 95%={q95:F2}"); +} +``` + +### Random Sample Generation + +```cs +// Generate random samples +int n = 1000; +int seed = 12345; + +double[,] samples = mvn.GenerateRandomValues(n, seed); + +Console.WriteLine($"Generated {n} samples from MVN distribution"); + +// Compute sample statistics +double[] sampleMeans = new double[mvn.Dimension]; +double[,] sampleCov = new double[mvn.Dimension, mvn.Dimension]; + +// Calculate sample means +for (int j = 0; j < mvn.Dimension; j++) +{ + for (int i = 0; i < n; i++) + { + sampleMeans[j] += samples[i, j]; + } + sampleMeans[j] /= n; +} + +Console.WriteLine($"Sample means: [{string.Join(", ", sampleMeans.Select(m => m.ToString("F2")))}]"); +Console.WriteLine($"True means: [{string.Join(", ", mvn.Mean.Select(m => m.ToString("F2")))}]"); +``` + +## Correlation Structure + +### Creating Correlation Matrix + +```cs +// Convert covariance to correlation +int dim = 3; +double[,] cov = mvn.Covariance; +double[,] corr = new double[dim, dim]; + +for (int i = 0; i < dim; i++) +{ + for (int j = 0; j < dim; j++) + { + corr[i, j] = cov[i, j] / Math.Sqrt(cov[i, i] * cov[j, j]); + } +} + +Console.WriteLine("Correlation matrix:"); +Console.WriteLine(" X₁ X₂ X₃"); +for (int i = 0; i < dim; i++) +{ + Console.Write($"X{i + 1}: "); + for (int j = 0; j < dim; j++) + { + Console.Write($"{corr[i, j],6:F3}"); + } + Console.WriteLine(); +} +``` + +### Building Covariance from Correlation + +```cs +// Given correlation and standard deviations +double[] stdDevs = { 15, 10, 12 }; +double[,] corrMatrix = { + { 1.0, 0.5, 0.3 }, + { 0.5, 1.0, 0.2 }, + { 0.3, 0.2, 1.0 } +}; + +// Convert to covariance: Σᵢⱼ = ρᵢⱼ·σᵢ·σⱼ +double[,] covMatrix = new double[3, 3]; +for (int i = 0; i < 3; i++) +{ + for (int j = 0; j < 3; j++) + { + covMatrix[i, j] = corrMatrix[i, j] * stdDevs[i] * stdDevs[j]; + } +} + +var mvn = new MultivariateNormal(new double[] { 100, 50, 75 }, covMatrix); + +Console.WriteLine("Created MVN from correlation structure"); +``` + +## Practical Applications + +### Example 1: Correlated Risk Factors + +```cs +using Numerics.Distributions; +using Numerics.Data.Statistics; + +// Model correlated financial returns +// X₁ = Stock A return, X₂ = Stock B return, X₃ = Market return + +double[] meanReturns = { 0.08, 0.10, 0.09 }; // 8%, 10%, 9% annual +double[] volatilities = { 0.20, 0.25, 0.15 }; // 20%, 25%, 15% annual + +// Correlation structure +double[,] corr = { + { 1.0, 0.6, 0.8 }, // Stock A vs B: 0.6, A vs Market: 0.8 + { 0.6, 1.0, 0.7 }, // B vs Market: 0.7 + { 0.8, 0.7, 1.0 } +}; + +// Build covariance matrix +double[,] cov = new double[3, 3]; +for (int i = 0; i < 3; i++) +{ + for (int j = 0; j < 3; j++) + { + cov[i, j] = corr[i, j] * volatilities[i] * volatilities[j]; + } +} + +var returns = new MultivariateNormal(meanReturns, cov); + +// Simulate portfolio returns +int nYears = 1000; +double[,] scenarios = returns.GenerateRandomValues(nYears, seed: 123); + +// Portfolio: 50% Stock A, 30% Stock B, 20% Market Index +double[] weights = { 0.5, 0.3, 0.2 }; + +double[] portfolioReturns = new double[nYears]; +for (int i = 0; i < nYears; i++) +{ + portfolioReturns[i] = weights[0] * scenarios[i, 0] + + weights[1] * scenarios[i, 1] + + weights[2] * scenarios[i, 2]; +} + +Console.WriteLine($"Portfolio Analysis ({nYears} simulations):"); +Console.WriteLine($" Mean return: {portfolioReturns.Average():P2}"); +Console.WriteLine($" Volatility: {Statistics.StandardDeviation(portfolioReturns):P2}"); +Console.WriteLine($" 5th percentile: {Statistics.Percentile(portfolioReturns.OrderBy(r => r).ToArray(), 5):P2}"); +Console.WriteLine($" 95th percentile: {Statistics.Percentile(portfolioReturns.OrderBy(r => r).ToArray(), 95):P2}"); +``` + +### Example 2: Multivariate Exceedance Probability + +```cs +// Joint probability of exceeding thresholds +// Example: High temperature AND low rainfall AND high wind speed + +double[] means = { 75, 2.5, 15 }; // Temp (°F), Rain (in), Wind (mph) +double[,] cov = { + { 100, -5, 10 }, // Temp variance=100, negative corr with rain + { -5, 1.5, -2 }, // Rain variance=1.5, negative corr with wind + { 10, -2, 25 } // Wind variance=25 +}; + +var weather = new MultivariateNormal(means, cov); + +// Critical thresholds +double[] thresholds = { 85, 1.0, 20 }; // High temp, low rain, high wind + +// P(Temp > 85 AND Rain < 1.0 AND Wind > 20) +// This is complex for MVN, but we can estimate with simulation + +int nSims = 100000; +double[,] samples = weather.GenerateRandomValues(nSims, seed: 456); + +int exceedances = 0; +for (int i = 0; i < nSims; i++) +{ + if (samples[i, 0] > thresholds[0] && // High temp + samples[i, 1] < thresholds[1] && // Low rain + samples[i, 2] > thresholds[2]) // High wind + { + exceedances++; + } +} + +double prob = (double)exceedances / nSims; + +Console.WriteLine($"Joint Exceedance Probability:"); +Console.WriteLine($" P(Temp>{thresholds[0]}, Rain<{thresholds[1]}, Wind>{thresholds[2]})"); +Console.WriteLine($" = {prob:P4} ({exceedances} out of {nSims})"); +Console.WriteLine($" Return period: {1.0 / prob:F0} events"); +``` + +### Example 3: Conditional Distributions + +```cs +// Conditional distribution of Y given X for bivariate normal + +double muX = 100, muY = 50; +double sigmaX = 15, sigmaY = 10; +double rho = 0.6; + +double[,] cov = { + { sigmaX * sigmaX, rho * sigmaX * sigmaY }, + { rho * sigmaX * sigmaY, sigmaY * sigmaY } +}; + +var bivariate = new MultivariateNormal(new[] { muX, muY }, cov); + +// Given X = 110, what is distribution of Y? +double xObs = 110; + +// Conditional mean: E[Y|X=x] = μ_Y + ρ(σ_Y/σ_X)(x - μ_X) +double muY_given_X = muY + rho * (sigmaY / sigmaX) * (xObs - muX); + +// Conditional variance: Var[Y|X=x] = σ_Y²(1 - ρ²) +double sigmaY_given_X = sigmaY * Math.Sqrt(1 - rho * rho); + +Console.WriteLine($"Conditional Distribution of Y given X={xObs}:"); +Console.WriteLine($" E[Y|X={xObs}] = {muY_given_X:F2}"); +Console.WriteLine($" SD[Y|X={xObs}] = {sigmaY_given_X:F2}"); + +// Create conditional distribution +var conditional = new Normal(muY_given_X, sigmaY_given_X); + +// 95% prediction interval +double lower = conditional.InverseCDF(0.025); +double upper = conditional.InverseCDF(0.975); + +Console.WriteLine($" 95% prediction interval: [{lower:F2}, {upper:F2}]"); +``` + +### Example 4: Mahalanobis Distance + +```cs +// Measure distance from mean accounting for correlations + +double[] means = { 100, 50, 75 }; +double[,] cov = { + { 225, 75, 50 }, + { 75, 100, 25 }, + { 50, 25, 144 } +}; + +var mvn = new MultivariateNormal(means, cov); + +// Test point +double[] x = { 120, 60, 90 }; + +// Mahalanobis distance: D² = (x-μ)ᵀΣ⁻¹(x-μ) +var diff = new Vector(x.Select((xi, i) => xi - means[i]).ToArray()); +var covMatrix = new Matrix(cov); +var covInv = covMatrix.Inverse(); + +double mahalanobis = Math.Sqrt(diff.DotProduct(covInv.Multiply(diff))); + +// Chi-squared test: D² ~ χ²(k) under null hypothesis +double chiSqCritical = 7.815; // 95th percentile of χ²(3) + +Console.WriteLine($"Mahalanobis Distance Analysis:"); +Console.WriteLine($" Point: [{string.Join(", ", x)}]"); +Console.WriteLine($" Distance: {mahalanobis:F3}"); +Console.WriteLine($" D²: {mahalanobis * mahalanobis:F3}"); +Console.WriteLine($" Critical value (95%): {chiSqCritical:F3}"); + +if (mahalanobis * mahalanobis > chiSqCritical) + Console.WriteLine(" → Point is an outlier (p < 0.05)"); +else + Console.WriteLine(" → Point is not an outlier"); +``` + +## Key Concepts + +### Positive Definiteness + +The covariance matrix must be positive definite: + +```cs +// Valid covariance matrix +double[,] validCov = { + { 4.0, 1.5 }, + { 1.5, 2.0 } +}; + +// Invalid - not positive definite +double[,] invalidCov = { + { 1.0, 2.0 }, + { 2.0, 1.0 } // Correlation would be > 1 +}; + +try +{ + var invalid = new MultivariateNormal(new[] { 0.0, 0.0 }, invalidCov); +} +catch (Exception ex) +{ + Console.WriteLine($"Error: {ex.Message}"); +} +``` + +### Marginal Distributions + +Each component follows univariate normal: + +```cs +var mvn = new MultivariateNormal(means, cov); + +// Marginal distribution of X₁ +double mu1 = mvn.Mean[0]; +double sigma1 = Math.Sqrt(mvn.Covariance[0, 0]); +var marginal1 = new Normal(mu1, sigma1); + +Console.WriteLine($"Marginal X₁ ~ N({mu1:F2}, {sigma1:F2})"); +``` + +### Independence + +Variables are independent if covariance matrix is diagonal: + +```cs +// Independent variables +double[,] indepCov = { + { 100, 0, 0 }, + { 0, 64, 0 }, + { 0, 0, 225 } +}; + +var independent = new MultivariateNormal(new[] { 0.0, 0.0, 0.0 }, indepCov); +Console.WriteLine("Independent MVN → Diagonal covariance matrix"); +``` + +## Best Practices + +1. **Validate covariance matrix** - Must be symmetric and positive definite +2. **Check correlations** - Ensure |ρ| ≤ 1 for all pairs +3. **Use Cholesky decomposition** - Efficient for generation and PDF +4. **Consider dimension** - CDF computation expensive for high dimensions +5. **Monte Carlo for CDF** - Set appropriate tolerance for accuracy +6. **Conditional distributions** - Use formula for bivariate case +7. **Outlier detection** - Use Mahalanobis distance + +## Computational Notes + +- **PDF**: O(k³) for Cholesky decomposition +- **CDF**: O(k) for k≤2, O(n·k) for k>2 (Monte Carlo with n evaluations) +- **Sampling**: O(k²) per sample using Cholesky +- **Storage**: O(k²) for covariance matrix + +--- + +## References + +[1] Tong, Y. L. (2012). *The Multivariate Normal Distribution*. Springer Science & Business Media. + +[2] Kotz, S., Balakrishnan, N., & Johnson, N. L. (2004). *Continuous Multivariate Distributions, Volume 1: Models and Applications* (2nd ed.). John Wiley & Sons. + +--- + +[← Previous: Copulas](copulas.md) | [Back to Index](../index.md) diff --git a/docs/distributions/parameter-estimation.md b/docs/distributions/parameter-estimation.md new file mode 100644 index 00000000..50141250 --- /dev/null +++ b/docs/distributions/parameter-estimation.md @@ -0,0 +1,586 @@ +# Parameter Estimation + +[← Previous: Univariate Distributions](univariate.md) | [Back to Index](../index.md) | [Next: Uncertainty Analysis →](uncertainty-analysis.md) + +Parameter estimation is the process of fitting probability distributions to observed data. The ***Numerics*** library provides multiple estimation methods, each with different statistical properties and appropriate use cases. The library supports Method of Moments (MOM), L-Moments (L-MOM), Maximum Likelihood Estimation (MLE), and Method of Percentiles. + +## Estimation Methods + +The `ParameterEstimationMethod` enum defines the available methods: + +```cs +public enum ParameterEstimationMethod +{ + MaximumLikelihood, // Maximum likelihood estimation + MethodOfMoments, // Product moments + MethodOfLinearMoments, // L-moments + MethodOfPercentiles // Least squares / percentiles +} +``` + +### Comparison of Methods + +| Method | Advantages | Disadvantages | Best For | +|--------|-----------|---------------|----------| +| **L-Moments** | Robust to outliers, unbiased for small samples, efficient | Computationally intensive | **Hydrological data, small samples** | +| **Maximum Likelihood** | Asymptotically efficient, optimal for large samples | Sensitive to outliers, can fail to converge | Large samples, well-behaved data | +| **Method of Moments** | Simple, fast | Inefficient, biased for small samples | Quick estimates, stable parameters | +| **Method of Percentiles** | Intuitive, robust | Less efficient | Expert judgment, special cases | + +**Recommendation for Hydrological Applications:** L-Moments are recommended by USGS [[1]](#1) for flood frequency analysis due to superior performance with small samples and robustness to outliers. + +## Using the Estimate() Method + +The simplest way to fit a distribution is using the `Estimate()` method: + +```cs +using Numerics.Distributions; +using Numerics.Data.Statistics; + +double[] annualPeakFlows = { 12500, 15300, 11200, 18700, 14100, 16800, 13400, 17200 }; + +// Method 1: Using Estimate() with estimation method +var gev = new GeneralizedExtremeValue(); +gev.Estimate(annualPeakFlows, ParameterEstimationMethod.MethodOfLinearMoments); + +Console.WriteLine($"GEV Parameters (L-Moments):"); +Console.WriteLine($" Location (ξ): {gev.Xi:F2}"); +Console.WriteLine($" Scale (α): {gev.Alpha:F2}"); +Console.WriteLine($" Shape (κ): {gev.Kappa:F4}"); + +// Method 2: Using MLE +gev.Estimate(annualPeakFlows, ParameterEstimationMethod.MaximumLikelihood); + +Console.WriteLine($"GEV Parameters (MLE):"); +Console.WriteLine($" Location (ξ): {gev.Xi:F2}"); +Console.WriteLine($" Scale (α): {gev.Alpha:F2}"); +Console.WriteLine($" Shape (κ): {gev.Kappa:F4}"); +``` + +The `Estimate()` method automatically: +1. Computes the required moments or likelihood +2. Estimates parameters using the specified method +3. Sets the distribution parameters +4. Validates the parameters + +## Manual Parameter Estimation + +For more control, use the `ParametersFrom*` methods to compute parameters without setting them: + +### L-Moments Approach + +```cs +using Numerics.Distributions; +using Numerics.Data.Statistics; + +double[] data = { 12500, 15300, 11200, 18700, 14100, 16800, 13400, 17200 }; + +// Step 1: Compute L-moments from data +double[] lMoments = Statistics.LinearMoments(data); + +Console.WriteLine("Sample L-Moments:"); +Console.WriteLine($" λ₁ (mean): {lMoments[0]:F2}"); +Console.WriteLine($" λ₂ (L-scale): {lMoments[1]:F2}"); +Console.WriteLine($" τ₃ (L-skewness): {lMoments[2]:F4}"); +Console.WriteLine($" τ₄ (L-kurtosis): {lMoments[3]:F4}"); + +// Step 2: Estimate parameters from L-moments +var gev = new GeneralizedExtremeValue(); +double[] parameters = gev.ParametersFromLinearMoments(lMoments); + +Console.WriteLine($"\nEstimated Parameters:"); +for (int i = 0; i < parameters.Length; i++) +{ + Console.WriteLine($" Parameter {i}: {parameters[i]:F4}"); +} + +// Step 3: Set the parameters +gev.SetParameters(parameters); + +// Verify: Check theoretical L-moments from fitted distribution +double[] theoreticalLMoments = gev.LinearMomentsFromParameters(parameters); + +Console.WriteLine($"\nTheoretical L-Moments from fitted distribution:"); +Console.WriteLine($" λ₁: {theoreticalLMoments[0]:F2}"); +Console.WriteLine($" λ₂: {theoreticalLMoments[1]:F2}"); +Console.WriteLine($" τ₃: {theoreticalLMoments[2]:F4}"); +Console.WriteLine($" τ₄: {theoreticalLMoments[3]:F4}"); +``` + +### Product Moments Approach + +```cs +// Step 1: Compute product moments from data +double[] moments = Statistics.ProductMoments(data); + +Console.WriteLine("Sample Product Moments:"); +Console.WriteLine($" Mean: {moments[0]:F2}"); +Console.WriteLine($" Std Dev: {moments[1]:F2}"); +Console.WriteLine($" Skewness: {moments[2]:F4}"); +Console.WriteLine($" Kurtosis: {moments[3]:F4}"); + +// Step 2: Estimate parameters from moments +var normal = new Normal(); +double[] normParams = normal.ParametersFromMoments(moments); + +Console.WriteLine($"\nNormal Parameters:"); +Console.WriteLine($" μ: {normParams[0]:F2}"); +Console.WriteLine($" σ: {normParams[1]:F2}"); + +// Step 3: Set parameters +normal.SetParameters(normParams); + +// Verify: Check theoretical moments +double[] theoreticalMoments = normal.MomentsFromParameters(normParams); +Console.WriteLine($"\nTheoretical Moments:"); +Console.WriteLine($" Mean: {theoreticalMoments[0]:F2}"); +Console.WriteLine($" Std Dev: {theoreticalMoments[1]:F2}"); +Console.WriteLine($" Skewness: {theoreticalMoments[2]:F4}"); +``` + +## L-Moments (Linear Moments) + +L-moments are linear combinations of order statistics that provide robust alternatives to conventional moments [[2]](#2). They are especially valuable for: +- Small sample sizes (n < 50) +- Data with outliers +- Hydrological applications +- Extreme value analysis + +### Properties of L-Moments + +1. **More robust** than conventional moments - less influenced by outliers +2. **Less biased** for small samples +3. **More efficient** - smaller sampling variance +4. **Bounded** - L-moment ratios are bounded, unlike conventional moments +5. **Nearly unbiased** even for very small samples (n = 10) + +### Computing L-Moments + +```cs +using Numerics.Data.Statistics; + +double[] sample = { 10.5, 12.3, 11.8, 15.2, 13.7, 14.1, 16.8, 12.9 }; + +// Compute L-moments +double[] lMoments = Statistics.LinearMoments(sample); + +Console.WriteLine("L-Moments:"); +Console.WriteLine($" λ₁ (L-location/mean): {lMoments[0]:F3}"); +Console.WriteLine($" λ₂ (L-scale): {lMoments[1]:F3}"); +Console.WriteLine($" τ₃ (L-skewness): {lMoments[2]:F4}"); +Console.WriteLine($" τ₄ (L-kurtosis): {lMoments[3]:F4}"); + +// L-moment ratios +double tau3 = lMoments[2]; // L-skewness +double tau4 = lMoments[3]; // L-kurtosis + +// Interpret L-skewness +if (Math.Abs(tau3) < 0.1) + Console.WriteLine("Distribution is approximately symmetric"); +else if (tau3 > 0) + Console.WriteLine("Distribution is right-skewed"); +else + Console.WriteLine("Distribution is left-skewed"); +``` + +### L-Moment Diagrams + +L-moment diagrams plot L-skewness (τ₃) vs L-kurtosis (τ₄) to identify appropriate distributions [[2]](#2): + +```cs +// Theoretical L-moment ratios for distributions +var distributions = new (string Name, IUnivariateDistribution Dist)[] +{ + ("GEV", new GeneralizedExtremeValue(1000, 200, -0.1)), + ("Gumbel", new Gumbel(1000, 200)), + ("Normal", new Normal(1000, 200)), + ("LP3", new LogPearsonTypeIII(7.0, 0.2, 0.3)) +}; + +Console.WriteLine("Distribution | τ₃ (L-skew) | τ₄ (L-kurt)"); +Console.WriteLine("--------------------------------------------"); + +foreach (var (name, dist) in distributions) +{ + var lMom = dist.LinearMomentsFromParameters(dist.GetParameters); + Console.WriteLine($"{name,-12} | {lMom[2],11:F4} | {lMom[3],11:F4}"); +} + +// Compare with sample +double[] sampleLM = Statistics.LinearMoments(sample); +Console.WriteLine($"{"Sample",-12} | {sampleLM[2],11:F4} | {sampleLM[3],11:F4}"); +``` + +## Maximum Likelihood Estimation + +MLE finds parameters that maximize the likelihood of observing the data [[3]](#3): + +```cs +using Numerics.Distributions; + +double[] observations = { 12.5, 15.3, 11.2, 18.7, 14.1, 16.8, 13.4, 17.2 }; + +// Fit using MLE +var weibull = new Weibull(); +weibull.Estimate(observations, ParameterEstimationMethod.MaximumLikelihood); + +Console.WriteLine($"Weibull Parameters (MLE):"); +Console.WriteLine($" Scale (α): {weibull.Alpha:F3}"); +Console.WriteLine($" Shape (β): {weibull.Beta:F3}"); + +// Compute log-likelihood at fitted parameters +double logLikelihood = 0; +foreach (var x in observations) +{ + logLikelihood += weibull.LogPDF(x); +} + +Console.WriteLine($"Log-likelihood: {logLikelihood:F4}"); +``` + +### MLE Properties + +**Advantages:** +- Asymptotically efficient (minimum variance for large n) +- Invariant under transformation +- Provides likelihood for model comparison (AIC, BIC) + +**Disadvantages:** +- Can fail to converge for difficult distributions +- Sensitive to outliers +- Biased for small samples +- Computationally expensive (requires optimization) + +### When MLE May Fail + +```cs +// MLE can fail with difficult starting values or poor data +var gev = new GeneralizedExtremeValue(); + +try +{ + gev.Estimate(observations, ParameterEstimationMethod.MaximumLikelihood); + Console.WriteLine("MLE converged successfully"); +} +catch (Exception ex) +{ + Console.WriteLine($"MLE failed: {ex.Message}"); + + // Fall back to L-moments + Console.WriteLine("Falling back to L-moments..."); + gev.Estimate(observations, ParameterEstimationMethod.MethodOfLinearMoments); + Console.WriteLine("L-moments estimation successful"); +} +``` + +## Method of Moments + +MOM matches sample moments with theoretical moments: + +```cs +double[] data = { 100, 105, 98, 110, 95, 102, 108, 97, 103, 106 }; + +// Product moments +double[] moments = Statistics.ProductMoments(data); + +// Fit Normal distribution +var normal = new Normal(); +normal.Estimate(data, ParameterEstimationMethod.MethodOfMoments); + +// Or manually +double[] params = normal.ParametersFromMoments(moments); +normal.SetParameters(params); + +Console.WriteLine($"Normal Distribution (MOM):"); +Console.WriteLine($" μ = {normal.Mu:F2}"); +Console.WriteLine($" σ = {normal.Sigma:F2}"); +Console.WriteLine($" Sample mean = {moments[0]:F2}"); +Console.WriteLine($" Sample std dev = {moments[1]:F2}"); +``` + +## Distribution-Specific Estimation + +### Log-Pearson Type III (USGS Bulletin 17C) + +For USGS flood frequency analysis [[1]](#1): + +```cs +using Numerics.Distributions; +using Numerics.Data.Statistics; + +double[] annualPeaks = { 12500, 15300, 11200, 18700, 14100, 16800, 13400, 17200, 10500, 19300 }; + +// USGS recommends L-moments with Expected Moments Algorithm (EMA) adjustments +var lp3 = new LogPearsonTypeIII(); +lp3.Estimate(annualPeaks, ParameterEstimationMethod.MethodOfLinearMoments); + +Console.WriteLine("LP3 Parameters (Bulletin 17C method):"); +Console.WriteLine($" μ (log-space mean): {lp3.Mu:F4}"); +Console.WriteLine($" σ (log-space std dev): {lp3.Sigma:F4}"); +Console.WriteLine($" γ (log-space skew): {lp3.Gamma:F4}"); + +// Compute flood frequency curve +var returnPeriods = new int[] { 2, 5, 10, 25, 50, 100, 200, 500 }; + +Console.WriteLine("\nFlood Frequency Analysis:"); +Console.WriteLine("T (years) | AEP | Discharge"); +foreach (var T in returnPeriods) +{ + double aep = 1.0 / T; + double Q = lp3.InverseCDF(1 - aep); + Console.WriteLine($"{T,9} | {aep,7:F5} | {Q,10:F0}"); +} +``` + +### Generalized Extreme Value + +```cs +// Block maxima approach +double[] blockMaxima = { 125, 153, 112, 187, 141, 168 }; + +var gev = new GeneralizedExtremeValue(); + +// Try MLE first +try +{ + gev.Estimate(blockMaxima, ParameterEstimationMethod.MaximumLikelihood); + Console.WriteLine("Fitted using MLE"); +} +catch +{ + // Fall back to L-moments + gev.Estimate(blockMaxima, ParameterEstimationMethod.MethodOfLinearMoments); + Console.WriteLine("Fitted using L-moments (MLE failed)"); +} + +Console.WriteLine($"ξ = {gev.Xi:F2}, α = {gev.Alpha:F2}, κ = {gev.Kappa:F4}"); + +// Classify GEV type +if (Math.Abs(gev.Kappa) < 0.01) + Console.WriteLine("Approximately Gumbel (Type I)"); +else if (gev.Kappa < 0) + Console.WriteLine($"Weibull/Type III (bounded above at {gev.Xi - gev.Alpha / gev.Kappa:F1})"); +else + Console.WriteLine("Fréchet/Type II (heavy tail)"); +``` + +### Two-Parameter Distributions + +For simpler distributions: + +```cs +double[] data = { 10.5, 12.3, 11.8, 15.2, 13.7, 14.1, 16.8, 12.9 }; + +// Exponential - one parameter +var exponential = new Exponential(); +exponential.Estimate(data, ParameterEstimationMethod.MethodOfMoments); +Console.WriteLine($"Exponential λ = {exponential.Lambda:F4}"); + +// Log-Normal - two parameters +var lognormal = new LogNormal(); +lognormal.Estimate(data, ParameterEstimationMethod.MethodOfMoments); +Console.WriteLine($"LogNormal μ = {lognormal.Mu:F4}, σ = {lognormal.Sigma:F4}"); + +// Weibull - two parameters +var weibull = new Weibull(); +weibull.Estimate(data, ParameterEstimationMethod.MethodOfLinearMoments); +Console.WriteLine($"Weibull α = {weibull.Alpha:F4}, β = {weibull.Beta:F4}"); +``` + +## Practical Workflow Example + +Complete workflow for flood frequency analysis: + +```cs +using Numerics.Distributions; +using Numerics.Data.Statistics; + +// Step 1: Load and prepare data +double[] annualPeakFlows = LoadFloodData(); // Your data loading function +Console.WriteLine($"Sample size: {annualPeakFlows.Length}"); +Console.WriteLine($"Sample mean: {annualPeakFlows.Average():F0}"); +Console.WriteLine($"Sample std dev: {Statistics.StandardDeviation(annualPeakFlows):F0}"); + +// Step 2: Compute sample L-moments +double[] lMoments = Statistics.LinearMoments(annualPeakFlows); +Console.WriteLine($"\nSample L-moments:"); +Console.WriteLine($" λ₁ = {lMoments[0]:F0}"); +Console.WriteLine($" λ₂ = {lMoments[1]:F0}"); +Console.WriteLine($" τ₃ = {lMoments[2]:F4}"); +Console.WriteLine($" τ₄ = {lMoments[3]:F4}"); + +// Step 3: Fit multiple candidate distributions +var candidates = new List<(string Name, IUnivariateDistribution Dist)> +{ + ("LP3", new LogPearsonTypeIII()), + ("GEV", new GeneralizedExtremeValue()), + ("Gumbel", new Gumbel()), + ("PIII", new PearsonTypeIII()) +}; + +foreach (var (name, dist) in candidates) +{ + dist.Estimate(annualPeakFlows, ParameterEstimationMethod.MethodOfLinearMoments); + + Console.WriteLine($"\n{name} fitted:"); + var paramNames = dist.ParameterNamesShortForm; + var paramValues = dist.GetParameters; + for (int i = 0; i < dist.NumberOfParameters; i++) + { + Console.WriteLine($" {paramNames[i]} = {paramValues[i]:F4}"); + } +} + +// Step 4: Compare at key quantiles +var testProbs = new double[] { 0.5, 0.9, 0.98, 0.99, 0.998 }; + +Console.WriteLine($"\nQuantile Comparison:"); +Console.WriteLine($"AEP | " + string.Join(" | ", candidates.Select(c => $"{c.Name,8}"))); +Console.WriteLine(new string('-', 60)); + +foreach (var p in testProbs) +{ + double aep = 1 - p; + var quantiles = candidates.Select(c => c.Dist.InverseCDF(p)); + Console.WriteLine($"{aep:F3} | " + string.Join(" | ", quantiles.Select(q => $"{q,8:F0}"))); +} + +// Step 5: Select best distribution (using GOF or judgment) +// See goodness-of-fit documentation for formal selection +var selectedDist = candidates[0].Dist; // e.g., LP3 for USGS applications + +// Step 6: Compute design floods +Console.WriteLine($"\n100-year flood: {selectedDist.InverseCDF(0.99):F0} cfs"); +Console.WriteLine($"500-year flood: {selectedDist.InverseCDF(0.998):F0} cfs"); +``` + +## Estimation with Censored Data + +For data with detection limits or censoring: + +```cs +// Low flows below detection limit (left-censored) +double detectionLimit = 5.0; +var observed = data.Where(x => x >= detectionLimit).ToArray(); +int nCensored = data.Length - observed.Length; + +Console.WriteLine($"Observed: {observed.Length}, Censored: {nCensored}"); + +// Fit using only observed values +var lognormal = new LogNormal(); +lognormal.Estimate(observed, ParameterEstimationMethod.MethodOfMoments); + +// Note: This is a simple approach. For formal censored data analysis, +// use MLE with censored likelihood (requires custom implementation) +``` + +## Tips and Best Practices + +### 1. Sample Size Requirements + +```cs +// Rule of thumb: n > 10 * number of parameters +if (data.Length < 3 * dist.NumberOfParameters * 10) +{ + Console.WriteLine("Warning: Small sample size relative to parameters"); + Console.WriteLine("Consider using L-moments for improved efficiency"); +} +``` + +### 2. Checking Parameter Validity + +```cs +var gev = new GeneralizedExtremeValue(); +gev.Estimate(data, ParameterEstimationMethod.MethodOfLinearMoments); + +if (!gev.ParametersValid) +{ + Console.WriteLine("Warning: Invalid parameters estimated"); + Console.WriteLine("Try different estimation method or distribution"); +} +``` + +### 3. Comparing Estimation Methods + +```cs +var methods = new[] +{ + ParameterEstimationMethod.MethodOfLinearMoments, + ParameterEstimationMethod.MethodOfMoments, + ParameterEstimationMethod.MaximumLikelihood +}; + +foreach (var method in methods) +{ + var dist = new GeneralizedExtremeValue(); + try + { + dist.Estimate(data, method); + Console.WriteLine($"{method}: κ = {dist.Kappa:F4}"); + } + catch (Exception ex) + { + Console.WriteLine($"{method}: Failed - {ex.Message}"); + } +} +``` + +### 4. Outlier Detection + +```cs +// Identify potential outliers before estimation +double[] sorted = data.OrderBy(x => x).ToArray(); +double Q1 = Statistics.Quantile(sorted, 0.25); +double Q3 = Statistics.Quantile(sorted, 0.75); +double IQR = Q3 - Q1; + +var outliers = data.Where(x => x < Q1 - 1.5 * IQR || x > Q3 + 1.5 * IQR).ToArray(); + +if (outliers.Length > 0) +{ + Console.WriteLine($"Potential outliers detected: {outliers.Length}"); + Console.WriteLine("Consider using L-moments (more robust to outliers)"); +} +``` + +### 5. Historical Information + +When historical data exists outside the systematic record: + +```cs +// Combine systematic record with historical peaks +double[] systematicRecord = { 12.5, 15.3, 11.2, 18.7, 14.1 }; // Recent, complete +double[] historicalPeaks = { 22.3 }; // Known historical floods + +// This is a simplified approach +// For formal analysis, use Expected Moments Algorithm (EMA) +var combined = systematicRecord.Concat(historicalPeaks).ToArray(); + +var lp3 = new LogPearsonTypeIII(); +lp3.Estimate(combined, ParameterEstimationMethod.MethodOfLinearMoments); + +Console.WriteLine("Fitted with historical information included"); +``` + +## Common Pitfalls + +1. **Using MOM for small samples** - Use L-moments instead +2. **Ignoring convergence failures in MLE** - Always check and have fallback +3. **Not checking parameter validity** - Always validate after estimation +4. **Wrong distribution family** - Use L-moment diagrams for selection +5. **Ignoring outliers** - L-moments are more robust than MOM or MLE +6. **Insufficient sample size** - Need at least 10n observations where n is number of parameters + +--- + +## References + +[1] England, J. F., Jr., Cohn, T. A., Faber, B. A., Stedinger, J. R., Thomas, W. O., Jr., Veilleux, A. G., Kiang, J. E., & Mason, R. R., Jr. (2018). *Guidelines for Determining Flood Flow Frequency—Bulletin 17C* (ver. 1.1, May 2019): U.S. Geological Survey Techniques and Methods, book 4, chap. B5, 148 p. + +[2] Hosking, J. R. M. (1990). L-moments: Analysis and estimation of distributions using linear combinations of order statistics. *Journal of the Royal Statistical Society: Series B (Methodological)*, 52(1), 105-124. + +[3] Mood, A. M., Graybill, F. A., & Boes, D. C. (1974). *Introduction to the Theory of Statistics* (3rd ed.). McGraw-Hill. + +--- + +[← Previous: Univariate Distributions](univariate.md) | [Back to Index](../index.md) | [Next: Uncertainty Analysis →](uncertainty-analysis.md) diff --git a/docs/distributions/uncertainty-analysis.md b/docs/distributions/uncertainty-analysis.md new file mode 100644 index 00000000..cf1601a8 --- /dev/null +++ b/docs/distributions/uncertainty-analysis.md @@ -0,0 +1,658 @@ +# Uncertainty Analysis + +[← Previous: Parameter Estimation](parameter-estimation.md) | [Back to Index](../index.md) | [Next: Copulas →](copulas.md) + +Uncertainty analysis quantifies the confidence in estimated distribution parameters and derived quantities (like design floods or failure probabilities). The ***Numerics*** library provides comprehensive bootstrap resampling methods for assessing parameter and quantile uncertainty, which are essential for risk-informed decision making. + +## Bootstrap Analysis Overview + +Bootstrap resampling [[1]](#1) is a nonparametric method for estimating sampling distributions and confidence intervals. It works by: + +1. Resampling the original data with replacement +2. Fitting the distribution to each bootstrap sample +3. Computing statistics from the ensemble of fitted distributions +4. Using percentiles of the bootstrap distribution for confidence intervals + +The bootstrap is particularly valuable when: +- Analytical confidence intervals are unavailable +- Sample sizes are small to moderate +- Distribution of estimators is unknown or complex +- Dealing with extreme value distributions + +## Creating a Bootstrap Analysis + +```cs +using Numerics.Distributions; + +// Original data +double[] annualPeakFlows = { 12500, 15300, 11200, 18700, 14100, 16800, 13400, 17200 }; + +// Fit parent distribution +var gev = new GeneralizedExtremeValue(); +gev.Estimate(annualPeakFlows, ParameterEstimationMethod.MethodOfLinearMoments); + +// Create bootstrap analysis +var bootstrap = new BootstrapAnalysis( + distribution: gev, + estimationMethod: ParameterEstimationMethod.MethodOfLinearMoments, + sampleSize: annualPeakFlows.Length, + replications: 10000, + seed: 12345); + +Console.WriteLine("Bootstrap analysis configured:"); +Console.WriteLine($" Sample size: {bootstrap.SampleSize}"); +Console.WriteLine($" Replications: {bootstrap.Replications}"); +Console.WriteLine($" Estimation method: {bootstrap.EstimationMethod}"); +``` + +### Constructor Parameters + +- **distribution**: The fitted parent distribution (must implement `IBootstrappable`) +- **estimationMethod**: Parameter estimation method for each bootstrap sample +- **sampleSize**: Size of each bootstrap sample (typically original sample size) +- **replications**: Number of bootstrap replications (recommended: 10,000-50,000) +- **seed**: Random seed for reproducibility (default: 12345) + +## Complete Uncertainty Analysis + +The `Estimate()` method performs a comprehensive uncertainty analysis: + +```cs +using Numerics.Distributions; + +double[] data = { 12500, 15300, 11200, 18700, 14100, 16800, 13400, 17200 }; + +// Fit parent distribution +var gev = new GeneralizedExtremeValue(); +gev.Estimate(data, ParameterEstimationMethod.MethodOfLinearMoments); + +// Configure bootstrap +var bootstrap = new BootstrapAnalysis(gev, + ParameterEstimationMethod.MethodOfLinearMoments, + sampleSize: data.Length, + replications: 10000); + +// Define probabilities of interest +var probabilities = new double[] { 0.5, 0.9, 0.95, 0.98, 0.99, 0.998 }; + +// Perform complete uncertainty analysis +var results = bootstrap.Estimate( + probabilities: probabilities, + alpha: 0.1, // 90% confidence intervals (1-α) + distributions: null, // Will generate new bootstrap samples + recordParameterSets: true); + +Console.WriteLine("Uncertainty Analysis Complete:"); +Console.WriteLine($" Parent distribution: {results.ParentDistribution.DisplayName}"); +Console.WriteLine($" Confidence level: {(1 - 0.1) * 100:F0}%"); +``` + +### UncertaintyAnalysisResults Structure + +The results object contains: + +```cs +// Parent (point estimate) distribution +UnivariateDistributionBase ParentDistribution + +// Mode curve: quantiles from parent distribution +double[,] ModeCurve // [probability, quantile] + +// Mean curve: expected quantiles from bootstrap ensemble +double[,] MeanCurve // [probability, quantile] + +// Confidence intervals for quantiles +double[,] ConfidenceIntervals // [probability, lower, upper] + +// Bootstrap parameter sets (if recorded) +ParameterSet[] ParameterSets + +// Goodness-of-fit metrics +double AIC, BIC, DIC, RMSE, ERL +``` + +## Accessing Uncertainty Results + +### Mode and Mean Curves + +```cs +var results = bootstrap.Estimate(probabilities, alpha: 0.1); + +Console.WriteLine("Quantile Estimates:"); +Console.WriteLine("AEP | T (years) | Mode | Mean | 90% CI"); +Console.WriteLine("--------------------------------------------------------"); + +for (int i = 0; i < probabilities.Length; i++) +{ + double prob = probabilities[i]; + double aep = 1 - prob; + double T = 1.0 / aep; + + double mode = results.ModeCurve[i, 1]; // Point estimate + double mean = results.MeanCurve[i, 1]; // Expected value + double lower = results.ConfidenceIntervals[i, 1]; // Lower bound + double upper = results.ConfidenceIntervals[i, 2]; // Upper bound + + Console.WriteLine($"{aep:F3} | {T,9:F1} | {mode,8:F0} | {mean,8:F0} | [{lower,6:F0}, {upper,6:F0}]"); +} +``` + +### Visualizing Uncertainty + +```cs +// Create plotting data for frequency curve with uncertainty bounds +var plotProbs = Enumerable.Range(1, 99).Select(i => i / 100.0).ToArray(); + +var results = bootstrap.Estimate(plotProbs, alpha: 0.1); + +// Export for plotting +using (var writer = new System.IO.StreamWriter("frequency_curve.csv")) +{ + writer.WriteLine("Probability,AEP,ReturnPeriod,Mode,Mean,Lower90,Upper90"); + + for (int i = 0; i < plotProbs.Length; i++) + { + double p = plotProbs[i]; + double aep = 1 - p; + double T = 1.0 / aep; + + writer.WriteLine($"{p:F4},{aep:F6},{T:F2}," + + $"{results.ModeCurve[i, 1]:F2}," + + $"{results.MeanCurve[i, 1]:F2}," + + $"{results.ConfidenceIntervals[i, 1]:F2}," + + $"{results.ConfidenceIntervals[i, 2]:F2}"); + } +} + +Console.WriteLine("Frequency curve data exported to frequency_curve.csv"); +``` + +## Bootstrap Parameter Distributions + +Examine the distribution of estimated parameters: + +```cs +var bootstrap = new BootstrapAnalysis(gev, + ParameterEstimationMethod.MethodOfLinearMoments, + data.Length, 10000); + +// Generate bootstrap distributions +var bootstrapDists = bootstrap.Distributions(); + +// Extract parameters +var parameterSets = bootstrap.ParameterSets(bootstrapDists); + +// Get parameter matrix [replication, parameter] +double[,] params = bootstrap.Parameters(bootstrapDists); + +Console.WriteLine("Bootstrap Parameter Statistics:"); +Console.WriteLine($"Number of successful replications: {bootstrapDists.Count(d => d != null)}"); + +// Analyze each parameter +string[] paramNames = gev.ParameterNamesShortForm; +for (int j = 0; j < gev.NumberOfParameters; j++) +{ + var values = new List(); + for (int i = 0; i < parameterSets.Length; i++) + { + if (parameterSets[i].Values != null) + values.Add(parameterSets[i].Values[j]); + } + + Console.WriteLine($"\n{paramNames[j]}:"); + Console.WriteLine($" Mean: {values.Average():F4}"); + Console.WriteLine($" Std Dev: {Statistics.StandardDeviation(values.ToArray()):F4}"); + Console.WriteLine($" 5th percentile: {Statistics.Quantile(values.OrderBy(x => x).ToArray(), 0.05):F4}"); + Console.WriteLine($" 95th percentile: {Statistics.Quantile(values.OrderBy(x => x).ToArray(), 0.95):F4}"); +} +``` + +## Confidence Interval Methods + +The ***Numerics*** library provides multiple methods for computing bootstrap confidence intervals, each with different properties. + +### 1. Percentile Method (Default) + +The simplest method - uses percentiles of the bootstrap distribution: + +```cs +var bootstrap = new BootstrapAnalysis(gev, + ParameterEstimationMethod.MethodOfLinearMoments, + data.Length, 10000); + +var probabilities = new double[] { 0.9, 0.99 }; + +// Percentile confidence intervals +double[,] percentileCI = bootstrap.PercentileQuantileCI( + probabilities: probabilities, + alpha: 0.1); // 90% CI + +Console.WriteLine("Percentile Confidence Intervals:"); +for (int i = 0; i < probabilities.Length; i++) +{ + Console.WriteLine($"P={probabilities[i]:F2}: [{percentileCI[i, 0]:F0}, {percentileCI[i, 1]:F0}]"); +} +``` + +### 2. Bias-Corrected (BC) Method + +Corrects for bias in the bootstrap distribution: + +```cs +// Bias-corrected CI +double[,] bcCI = bootstrap.BiasCorrectedQuantileCI(probabilities, alpha: 0.1); + +Console.WriteLine("Bias-Corrected Confidence Intervals:"); +for (int i = 0; i < probabilities.Length; i++) +{ + Console.WriteLine($"P={probabilities[i]:F2}: [{bcCI[i, 0]:F0}, {bcCI[i, 1]:F0}]"); +} +``` + +### 3. Normal Approximation Method + +Assumes normal distribution for bootstrap statistics: + +```cs +// Normal approximation CI +double[,] normalCI = bootstrap.NormalQuantileCI(probabilities, alpha: 0.1); + +Console.WriteLine("Normal Approximation Confidence Intervals:"); +for (int i = 0; i < probabilities.Length; i++) +{ + Console.WriteLine($"P={probabilities[i]:F2}: [{normalCI[i, 0]:F0}, {normalCI[i, 1]:F0}]"); +} +``` + +### 4. BCa (Bias-Corrected and Accelerated) + +The most accurate but computationally intensive method [[2]](#2): + +```cs +// BCa method requires original sample data +double[,] bcaCI = bootstrap.BCaQuantileCI( + sampleData: data, + probabilities: probabilities, + alpha: 0.1); + +Console.WriteLine("BCa Confidence Intervals:"); +for (int i = 0; i < probabilities.Length; i++) +{ + Console.WriteLine($"P={probabilities[i]:F2}: [{bcaCI[i, 0]:F0}, {bcaCI[i, 1]:F0}]"); +} +``` + +### 5. Bootstrap-t Method + +Uses studentized bootstrap for improved coverage: + +```cs +// Bootstrap-t CI +double[,] btCI = bootstrap.BootstrapTQuantileCI(probabilities, alpha: 0.1); + +Console.WriteLine("Bootstrap-t Confidence Intervals:"); +for (int i = 0; i < probabilities.Length; i++) +{ + Console.WriteLine($"P={probabilities[i]:F2}: [{btCI[i, 0]:F0}, {btCI[i, 1]:F0}]"); +} +``` + +### Comparing CI Methods + +```cs +var methods = new[] +{ + ("Percentile", bootstrap.PercentileQuantileCI(probabilities, 0.1)), + ("Bias-Corrected", bootstrap.BiasCorrectedQuantileCI(probabilities, 0.1)), + ("Normal", bootstrap.NormalQuantileCI(probabilities, 0.1)), + ("BCa", bootstrap.BCaQuantileCI(data, probabilities, 0.1)), + ("Bootstrap-t", bootstrap.BootstrapTQuantileCI(probabilities, 0.1)) +}; + +Console.WriteLine("Comparison of CI Methods (90% intervals):"); +Console.WriteLine("Probability | Percentile | BC | Normal | BCa | Bootstrap-t"); +Console.WriteLine("------------------------------------------------------------------------------------------------------"); + +for (int i = 0; i < probabilities.Length; i++) +{ + Console.Write($"{probabilities[i],11:F2} | "); + foreach (var (name, ci) in methods) + { + Console.Write($"[{ci[i, 0],6:F0},{ci[i, 1],6:F0}] | "); + } + Console.WriteLine(); +} +``` + +## Bootstrap Moments + +Compute product moments and L-moments from bootstrap ensemble: + +```cs +// Product moments from bootstrap replications +double[,] productMoments = bootstrap.ProductMoments(); + +Console.WriteLine("Bootstrap Product Moments:"); +Console.WriteLine($"Mean of means: {productMoments[0, 0]:F2}"); +Console.WriteLine($"Mean of std devs: {productMoments[1, 0]:F2}"); +Console.WriteLine($"Mean of skewness: {productMoments[2, 0]:F4}"); +Console.WriteLine($"Mean of kurtosis: {productMoments[3, 0]:F4}"); + +// L-moments from bootstrap replications +double[,] lMoments = bootstrap.LinearMoments(); + +Console.WriteLine("\nBootstrap L-Moments:"); +Console.WriteLine($"Mean λ₁: {lMoments[0, 0]:F2}"); +Console.WriteLine($"Mean λ₂: {lMoments[1, 0]:F2}"); +Console.WriteLine($"Mean τ₃: {lMoments[2, 0]:F4}"); +Console.WriteLine($"Mean τ₄: {lMoments[3, 0]:F4}"); +``` + +## Expected Probability (Rare Events) + +For very rare events, compute expected probabilities: + +```cs +// Quantiles of interest (e.g., design floods) +var quantiles = new double[] { 15000, 20000, 25000, 30000 }; + +// Expected probabilities from bootstrap ensemble +double[] expectedProbs = bootstrap.ExpectedProbabilities(quantiles); + +Console.WriteLine("Expected Probabilities for Design Floods:"); +Console.WriteLine("Discharge | Expected AEP | Expected Return Period"); +Console.WriteLine("----------------------------------------------------"); + +for (int i = 0; i < quantiles.Length; i++) +{ + double aep = 1 - expectedProbs[i]; + double T = 1.0 / aep; + Console.WriteLine($"{quantiles[i],9:F0} | {aep,12:E4} | {T,22:F1} years"); +} +``` + +## Practical Example: Complete Flood Frequency Analysis with Uncertainty + +```cs +using Numerics.Distributions; +using Numerics.Data.Statistics; + +// Step 1: Load annual peak flow data +double[] annualPeaks = { 12500, 15300, 11200, 18700, 14100, 16800, 13400, 17200, 10500, 19300 }; + +Console.WriteLine($"Flood Frequency Analysis"); +Console.WriteLine($"Sample size: {annualPeaks.Length}"); +Console.WriteLine($"Sample mean: {annualPeaks.Average():F0} cfs"); +Console.WriteLine($"Sample std dev: {Statistics.StandardDeviation(annualPeaks):F0} cfs"); + +// Step 2: Compute sample L-moments +double[] sampleLM = Statistics.LinearMoments(annualPeaks); +Console.WriteLine($"\nSample L-moments:"); +Console.WriteLine($" λ₁: {sampleLM[0]:F0}"); +Console.WriteLine($" λ₂: {sampleLM[1]:F0}"); +Console.WriteLine($" τ₃: {sampleLM[2]:F4}"); + +// Step 3: Fit LP3 distribution (USGS standard) +var lp3 = new LogPearsonTypeIII(); +lp3.Estimate(annualPeaks, ParameterEstimationMethod.MethodOfLinearMoments); + +Console.WriteLine($"\nFitted LP3 Distribution:"); +Console.WriteLine($" μ: {lp3.Mu:F4}"); +Console.WriteLine($" σ: {lp3.Sigma:F4}"); +Console.WriteLine($" γ: {lp3.Gamma:F4}"); + +// Step 4: Bootstrap uncertainty analysis +Console.WriteLine($"\nPerforming bootstrap analysis..."); +var bootstrap = new BootstrapAnalysis( + distribution: lp3, + estimationMethod: ParameterEstimationMethod.MethodOfLinearMoments, + sampleSize: annualPeaks.Length, + replications: 10000, + seed: 12345); + +// Return periods of interest +var returnPeriods = new int[] { 2, 5, 10, 25, 50, 100, 200, 500 }; +var probabilities = returnPeriods.Select(T => 1.0 - 1.0 / T).ToArray(); + +// Perform uncertainty analysis with 90% confidence intervals +var results = bootstrap.Estimate(probabilities, alpha: 0.1, recordParameterSets: true); + +// Step 5: Present results +Console.WriteLine($"\nFlood Frequency Analysis Results (90% Confidence):"); +Console.WriteLine("─────────────────────────────────────────────────────────────────────"); +Console.WriteLine("Return Annual Point Expected 90% Confidence Interval"); +Console.WriteLine("Period Exceedance Estimate Value Lower Upper "); +Console.WriteLine("(years) Probability (cfs) (cfs) (cfs) (cfs) "); +Console.WriteLine("─────────────────────────────────────────────────────────────────────"); + +for (int i = 0; i < returnPeriods.Length; i++) +{ + int T = returnPeriods[i]; + double aep = 1.0 / T; + double point = results.ModeCurve[i, 1]; + double mean = results.MeanCurve[i, 1]; + double lower = results.ConfidenceIntervals[i, 1]; + double upper = results.ConfidenceIntervals[i, 2]; + + Console.WriteLine($"{T,6} {aep,11:F5} {point,8:F0} {mean,8:F0} {lower,8:F0} {upper,8:F0}"); +} + +Console.WriteLine("─────────────────────────────────────────────────────────────────────"); + +// Step 6: Parameter uncertainty +Console.WriteLine($"\nParameter Uncertainty:"); +Console.WriteLine($"Parameter sets recorded: {results.ParameterSets.Length}"); + +double[] muValues = results.ParameterSets.Select(ps => ps.Values[0]).ToArray(); +double[] sigmaValues = results.ParameterSets.Select(ps => ps.Values[1]).ToArray(); +double[] gammaValues = results.ParameterSets.Select(ps => ps.Values[2]).ToArray(); + +Console.WriteLine($"μ: {muValues.Average():F4} ± {Statistics.StandardDeviation(muValues):F4}"); +Console.WriteLine($"σ: {sigmaValues.Average():F4} ± {Statistics.StandardDeviation(sigmaValues):F4}"); +Console.WriteLine($"γ: {gammaValues.Average():F4} ± {Statistics.StandardDeviation(gammaValues):F4}"); + +// Step 7: Uncertainty bounds width +Console.WriteLine($"\nUncertainty Analysis Summary:"); +for (int i = 0; i < returnPeriods.Length; i++) +{ + double width = results.ConfidenceIntervals[i, 2] - results.ConfidenceIntervals[i, 1]; + double relativeWidth = width / results.ModeCurve[i, 1] * 100; + Console.WriteLine($"{returnPeriods[i]}-year: ±{relativeWidth:F1}% relative uncertainty"); +} +``` + +## Advanced Bootstrap Techniques + +### Reusing Bootstrap Samples + +Generate bootstrap distributions once and reuse for multiple analyses: + +```cs +// Generate bootstrap distributions once +var bootstrapDists = bootstrap.Distributions(); + +Console.WriteLine($"Generated {bootstrapDists.Count(d => d != null)} valid bootstrap replications"); + +// Reuse for different probability sets +var probs1 = new double[] { 0.9, 0.95, 0.99 }; +var results1 = bootstrap.Estimate(probs1, alpha: 0.1, distributions: bootstrapDists); + +var probs2 = new double[] { 0.5, 0.75, 0.98 }; +var results2 = bootstrap.Estimate(probs2, alpha: 0.1, distributions: bootstrapDists); + +// Much faster since bootstrap samples are reused +``` + +### Custom Quantile Computations + +```cs +// Compute quantiles from bootstrap ensemble +var probsOfInterest = new double[] { 0.9, 0.95, 0.98, 0.99, 0.998 }; + +double[,] quantiles = bootstrap.Quantiles(probsOfInterest); + +Console.WriteLine("Bootstrap Quantiles:"); +Console.WriteLine("Prob | Mean | Std Dev | 5th %ile | 95th %ile"); +Console.WriteLine("------------------------------------------------------------"); + +for (int i = 0; i < probsOfInterest.Length; i++) +{ + Console.WriteLine($"{probsOfInterest[i]:F3} | {quantiles[i, 0],9:F0} | " + + $"{quantiles[i, 1],9:F0} | {quantiles[i, 2],9:F0} | {quantiles[i, 3],9:F0}"); +} +``` + +### Computing Probabilities + +Reverse direction - find probabilities for given quantiles: + +```cs +var designFlows = new double[] { 15000, 20000, 25000, 30000 }; + +double[,] probabilities = bootstrap.Probabilities(designFlows); + +Console.WriteLine("Probabilities for Design Flows:"); +Console.WriteLine("Flow | Mean Prob | Std Dev | 5th %ile | 95th %ile"); +Console.WriteLine("--------------------------------------------------------"); + +for (int i = 0; i < designFlows.Length; i++) +{ + double meanAEP = 1 - probabilities[i, 0]; + double meanT = 1.0 / meanAEP; + + Console.WriteLine($"{designFlows[i],5:F0} | {probabilities[i, 0],9:F4} | " + + $"{probabilities[i, 1],9:F4} | {probabilities[i, 2],9:F4} | {probabilities[i, 3],9:F4}"); + Console.WriteLine($" | T={meanT:F1} years"); +} +``` + +## Choosing Number of Replications + +```cs +// Assess convergence with different replication counts +var replicationCounts = new int[] { 1000, 5000, 10000, 20000 }; +var testProb = 0.99; // 100-year event + +Console.WriteLine("Bootstrap Convergence Analysis:"); +Console.WriteLine("Replications | 100-yr Estimate | CI Width | Time (relative)"); + +foreach (var nRep in replicationCounts) +{ + var boot = new BootstrapAnalysis(gev, + ParameterEstimationMethod.MethodOfLinearMoments, + data.Length, nRep); + + var result = boot.Estimate(new[] { testProb }, alpha: 0.1); + + double estimate = result.ModeCurve[0, 1]; + double width = result.ConfidenceIntervals[0, 2] - result.ConfidenceIntervals[0, 1]; + + Console.WriteLine($"{nRep,12} | {estimate,15:F0} | {width,8:F0} | {nRep / 1000.0,4:F1}×"); +} + +// Recommendation: 10,000-20,000 replications for most applications +// 50,000+ for critical infrastructure or very rare events +``` + +## Bootstrap vs. Analytical Uncertainty + +```cs +// Compare bootstrap CI with analytical (if available) +var normal = new Normal(100, 15); +normal.Estimate(data, ParameterEstimationMethod.MethodOfMoments); + +// Bootstrap CI +var bootstrap = new BootstrapAnalysis(normal, + ParameterEstimationMethod.MethodOfMoments, + data.Length, 10000); + +var probs = new double[] { 0.5, 0.9, 0.95 }; +var bootResults = bootstrap.Estimate(probs, alpha: 0.1); + +// Analytical CI (for normal distribution, if implemented) +// This would require separate implementation + +Console.WriteLine("Bootstrap vs Analytical Confidence Intervals:"); +Console.WriteLine("Both methods should give similar results for Normal distribution"); +Console.WriteLine("Bootstrap is more general and works for any distribution"); +``` + +## Best Practices + +### 1. Always Use Adequate Replications + +```cs +// Too few replications give unstable confidence intervals +int nReplications = Math.Max(10000, 20 * data.Length); +Console.WriteLine($"Recommended replications: {nReplications}"); +``` + +### 2. Check for Failed Replications + +```cs +var bootstrapDists = bootstrap.Distributions(); +int nFailed = bootstrapDists.Count(d => d == null); + +if (nFailed > bootstrapDists.Length * 0.05) +{ + Console.WriteLine($"Warning: {nFailed} replications failed ({100.0 * nFailed / bootstrapDists.Length:F1}%)"); + Console.WriteLine("Consider using more robust estimation method"); +} +``` + +### 3. Report Uncertainty Appropriately + +```cs +// Report point estimate ± uncertainty +double point = results.ModeCurve[0, 1]; +double lower = results.ConfidenceIntervals[0, 1]; +double upper = results.ConfidenceIntervals[0, 2]; + +Console.WriteLine($"100-year flood: {point:F0} cfs"); +Console.WriteLine($"90% CI: [{lower:F0}, {upper:F0}] cfs"); +Console.WriteLine($"Or: {point:F0} ± {(upper - point):F0}/-{(point - lower):F0} cfs"); +``` + +### 4. Consider Sample Size Effects + +```cs +// Uncertainty increases as sample size decreases +Console.WriteLine("Effect of Sample Size on Uncertainty:"); + +foreach (var n in new[] { 10, 20, 50, 100 }) +{ + // Simulate sampling from fitted distribution + var sample = gev.GenerateRandomValues(n); + var testDist = new GeneralizedExtremeValue(); + testDist.Estimate(sample, ParameterEstimationMethod.MethodOfLinearMoments); + + var testBoot = new BootstrapAnalysis(testDist, + ParameterEstimationMethod.MethodOfLinearMoments, n, 1000); + var testResults = testBoot.Estimate(new[] { 0.99 }, alpha: 0.1); + + double width = testResults.ConfidenceIntervals[0, 2] - testResults.ConfidenceIntervals[0, 1]; + Console.WriteLine($"n={n,3}: CI width = {width:F0}"); +} +``` + +## Common Pitfalls + +1. **Too few replications** - Use at least 10,000 for final analyses +2. **Wrong estimation method** - Use same method for parent and bootstrap +3. **Not checking convergence** - Monitor failed replications +4. **Ignoring small sample bias** - Bootstrap can't fix fundamental data limitations +5. **Overinterpreting precision** - CI width reflects sampling uncertainty only + +--- + +## References + +[1] Efron, B., & Tibshirani, R. J. (1993). *An Introduction to the Bootstrap*. Chapman & Hall/CRC. + +[2] Efron, B. (1987). Better bootstrap confidence intervals. *Journal of the American Statistical Association*, 82(397), 171-185. + +[3] Davison, A. C., & Hinkley, D. V. (1997). *Bootstrap Methods and Their Application*. Cambridge University Press. + +--- + +[← Previous: Parameter Estimation](parameter-estimation.md) | [Back to Index](../index.md) | [Next: Copulas →](copulas.md) diff --git a/docs/distributions/univariate.md b/docs/distributions/univariate.md new file mode 100644 index 00000000..7725f2c5 --- /dev/null +++ b/docs/distributions/univariate.md @@ -0,0 +1,568 @@ +# Univariate Distributions + +[← Back to Index](../index.md) | [Next: Parameter Estimation →](parameter-estimation.md) + +The ***Numerics*** library provides over 40 univariate probability distributions for statistical analysis, risk assessment, and uncertainty quantification. All distributions implement a common interface with consistent methods for computing probability density functions (PDF), cumulative distribution functions (CDF), quantiles, and statistical moments. + +## Available Distributions + +### Continuous Distributions + +| Distribution | Parameters | Typical Applications | +|--------------|-----------|---------------------| +| **Normal** | μ (mean), σ (std dev) | General purpose, natural phenomena | +| **Log-Normal** | μ, σ | Right-skewed data, multiplicative processes | +| **Uniform** | a (min), b (max) | Maximum entropy, prior distributions | +| **Exponential** | λ (rate) | Time between events, survival analysis | +| **Gamma** | α (shape), β (scale) | Waiting times, rainfall | +| **Beta** | α, β | Probabilities, proportions, [0,1] bounded | +| **Weibull** | α (scale), β (shape) | Failure times, wind speed | +| **Gumbel** | ξ (location), α (scale) | Extreme values (maxima) | +| **Generalized Extreme Value (GEV)** | ξ, α, κ (shape) | Block maxima, floods, earthquakes | +| **Generalized Pareto (GP)** | ξ, α, κ | Exceedances over threshold | +| **Log-Pearson Type III (LP3)** | μ, σ, γ (skew) | USGS flood frequency analysis | +| **Pearson Type III (P3)** | μ, σ, γ | Flood frequency, rainfall | +| **Kappa-Four (K4)** | ξ, α, κ, h | Flexible 4-parameter family | +| **Generalized Logistic (GLO)** | ξ, α, κ | Growth models, extreme values | +| **Generalized Normal (GNO)** | ξ, α, κ | Flexible alternative to GEV | +| **Generalized Beta (GB)** | a, b, α, β | [a,b] bounded with flexibility | +| **Triangular** | a, b, c (mode) | Simple uncertainty modeling | +| **Rayleigh** | σ | Wind speed, wave height | +| **Cauchy** | x₀ (location), γ (scale) | Heavy-tailed phenomena | +| **Logistic** | μ, s | Growth processes, neural networks | +| **Student's t** | ν (degrees of freedom) | Heavy-tailed alternative to Normal | +| **Noncentral t** | ν, δ (noncentrality) | Power analysis, hypothesis testing | +| **Chi-Squared** | k (degrees of freedom) | Variance estimation, goodness-of-fit | +| **Inverse Gamma** | α, β | Bayesian priors for variance | +| **Inverse Chi-Squared** | ν | Bayesian inference | +| **Pareto** | xₘ (scale), α (shape) | Income distributions, city sizes | +| **PERT** | a, b, c | Project management, expert judgment | +| **PERT Percentile** | P₁₀, P₅₀, P₉₀ | Expert percentile elicitation | +| **PERT Percentile Z** | Similar to PERT Percentile | Alternative parametrization | +| **Truncated Normal** | μ, σ, a, b | Bounded normal distributions | +| **Truncated Distribution** | Any distribution + bounds | Bounded versions of distributions | +| **Mixture** | Multiple distributions | Multi-modal data | +| **Empirical** | Sample data | Non-parametric, data-driven | +| **Kernel Density** | Sample data, bandwidth | Smooth non-parametric estimation | +| **Deterministic** | Single value | Point estimates, constants | +| **Competing Risks** | Multiple distributions | Failure analysis with multiple causes | + +### Discrete Distributions + +| Distribution | Parameters | Typical Applications | +|--------------|-----------|---------------------| +| **Bernoulli** | p (success probability) | Binary outcomes | +| **Binomial** | n (trials), p (success prob) | Number of successes in n trials | +| **Poisson** | λ (rate) | Count data, rare events | +| **Geometric** | p | Number of trials until first success | +| **Uniform Discrete** | a, b | Discrete uniform outcomes | + +## Common Interface + +All univariate distributions in ***Numerics*** implement the `IUnivariateDistribution` interface, providing: + +### Statistical Properties +```cs +double Mean // E[X] +double Median // 50th percentile +double Mode // Most likely value +double Variance // Var(X) +double StandardDeviation // √Var(X) +double Skewness // Measure of asymmetry +double Kurtosis // Measure of tail heaviness +double Minimum // Support lower bound +double Maximum // Support upper bound +``` + +### Probability Functions +```cs +double PDF(double x) // Probability density (or mass for discrete) +double CDF(double x) // P(X ≤ x) +double InverseCDF(double p) // Quantile function (inverse CDF) +double CCDF(double x) // P(X > x) = 1 - CDF(x) +double HF(double x) // Hazard function +double LogPDF(double x) // ln(PDF(x)) +double LogCDF(double x) // ln(CDF(x)) +double LogCCDF(double x) // ln(CCDF(x)) +``` + +### Random Generation +```cs +double[] GenerateRandomValues(int sampleSize, int seed = -1) +``` + +## Creating Distributions + +### Method 1: Direct Construction with Parameters + +```cs +using Numerics.Distributions; + +// Normal distribution: N(100, 15) +var normal = new Normal(mu: 100, sigma: 15); + +// Generalized Extreme Value: GEV(1000, 200, -0.1) +var gev = new GeneralizedExtremeValue(xi: 1000, alpha: 200, kappa: -0.1); + +// Log-Normal distribution +var lognormal = new LogNormal(mu: 4.5, sigma: 0.5); + +// Gamma distribution +var gamma = new GammaDistribution(alpha: 5, beta: 2); +``` + +### Method 2: Using SetParameters + +```cs +// Create with default parameters, then set +var weibull = new Weibull(); +weibull.SetParameters(new double[] { 50, 2.5 }); // alpha=50, beta=2.5 + +// Or use named parameters +weibull.SetParameters(alpha: 50, beta: 2.5); +``` + +### Method 3: From Parameter Array + +Useful when parameters are computed: + +```cs +double[] gevParams = SomeEstimationFunction(data); +var gev = new GeneralizedExtremeValue(); +gev.SetParameters(gevParams); +``` + +## Using Distributions + +### Basic Probability Calculations + +```cs +using Numerics.Distributions; + +var normal = new Normal(100, 15); + +// Probability density at x = 110 +double pdf = normal.PDF(110); // f(110) +Console.WriteLine($"PDF at 110: {pdf:F6}"); + +// Cumulative probability P(X ≤ 110) +double cdf = normal.CDF(110); +Console.WriteLine($"P(X ≤ 110) = {cdf:F4}"); // 0.7475 + +// Exceedance probability P(X > 110) +double ccdf = normal.CCDF(110); // or 1 - cdf +Console.WriteLine($"P(X > 110) = {ccdf:F4}"); // 0.2525 + +// Find quantile: what value corresponds to 95th percentile? +double q95 = normal.InverseCDF(0.95); +Console.WriteLine($"95th percentile: {q95:F2}"); // 124.67 +``` + +### Statistical Properties + +```cs +var gev = new GeneralizedExtremeValue(xi: 1000, alpha: 200, kappa: -0.1); + +Console.WriteLine($"Mean: {gev.Mean:F2}"); +Console.WriteLine($"Std Dev: {gev.StandardDeviation:F2}"); +Console.WriteLine($"Skewness: {gev.Skewness:F3}"); +Console.WriteLine($"Kurtosis: {gev.Kurtosis:F3}"); +Console.WriteLine($"Median: {gev.Median:F2}"); +Console.WriteLine($"Mode: {gev.Mode:F2}"); +``` + +### Hazard Function + +The hazard function describes instantaneous failure rate: + +```cs +var weibull = new Weibull(alpha: 100, beta: 2.5); + +// Hazard at time t=50 +double hazard = weibull.HF(50); +Console.WriteLine($"Hazard rate at t=50: {hazard:F6}"); + +// For Weibull, hazard increases with time when β > 1 (wear-out) +``` + +### Log-Space Calculations + +For numerical stability with very small probabilities: + +```cs +var normal = new Normal(0, 1); + +// Regular CDF can underflow for extreme values +double x = -10; +double logCDF = normal.LogCDF(x); // ln(CDF(x)) +double cdf = Math.Exp(logCDF); + +Console.WriteLine($"CDF(-10) = {cdf:E10}"); +Console.WriteLine($"Log-CDF(-10) = {logCDF:F4}"); +``` + +## Hydrological Distributions + +### Log-Pearson Type III (LP3) + +The LP3 distribution is the standard for USGS flood frequency analysis [[1]](#1): + +```cs +using Numerics.Distributions; + +double[] annualPeakFlows = { 12500, 15300, 11200, 18700, 14100, 16800, 13400, 17200 }; + +// Fit LP3 using L-Moments (recommended for hydrologic data) +var lp3 = new LogPearsonTypeIII(); +lp3.Estimate(annualPeakFlows, ParameterEstimationMethod.MethodOfLinearMoments); + +// Or explicitly with ParametersFromLinearMoments +var lMoments = Statistics.LinearMoments(annualPeakFlows); +lp3.SetParameters(lp3.ParametersFromLinearMoments(lMoments)); + +Console.WriteLine($"LP3 Parameters:"); +Console.WriteLine($" μ: {lp3.Mu:F3}"); +Console.WriteLine($" σ: {lp3.Sigma:F3}"); +Console.WriteLine($" γ: {lp3.Gamma:F3}"); + +// Compute flood quantiles +double q100 = lp3.InverseCDF(0.99); // 100-year flood (1% annual exceedance) +double q500 = lp3.InverseCDF(0.998); // 500-year flood +Console.WriteLine($"100-year flood: {q100:F0} cfs"); +Console.WriteLine($"500-year flood: {q500:F0} cfs"); +``` + +### Generalized Extreme Value (GEV) + +GEV is widely used for extreme value analysis [[2]](#2): + +```cs +// Annual maximum flood data +double[] annualMaxima = { 12500, 15300, 11200, 18700, 14100 }; + +var gev = new GeneralizedExtremeValue(); +gev.Estimate(annualMaxima, ParameterEstimationMethod.MethodOfLinearMoments); + +Console.WriteLine($"GEV Parameters:"); +Console.WriteLine($" Location (ξ): {gev.Xi:F2}"); +Console.WriteLine($" Scale (α): {gev.Alpha:F2}"); +Console.WriteLine($" Shape (κ): {gev.Kappa:F4}"); + +// Interpret shape parameter +if (gev.Kappa < 0) + Console.WriteLine(" Type III (Weibull) - bounded upper tail"); +else if (gev.Kappa > 0) + Console.WriteLine(" Type II (Fréchet) - heavy upper tail"); +else + Console.WriteLine(" Type I (Gumbel) - exponential tail"); +``` + +### Generalized Pareto Distribution (GPD) + +For peaks-over-threshold analysis [[3]](#3): + +```cs +// Values exceeding a threshold +double threshold = 10000; +var exceedances = annualPeakFlows.Where(x => x > threshold).Select(x => x - threshold).ToArray(); + +var gpd = new GeneralizedPareto(); +gpd.Estimate(exceedances, ParameterEstimationMethod.MethodOfLinearMoments); + +// Adjust location parameter for threshold +gpd.SetParameters(threshold, gpd.Alpha, gpd.Kappa); + +Console.WriteLine($"GPD for exceedances over {threshold}:"); +Console.WriteLine($" ξ: {gpd.Xi:F2}"); +Console.WriteLine($" α: {gpd.Alpha:F2}"); +Console.WriteLine($" κ: {gpd.Kappa:F4}"); +``` + +## Special Distribution Features + +### Truncated Distributions + +Create truncated versions of any distribution: + +```cs +// Normal truncated to [0, 100] +var truncNormal = new TruncatedNormal(mu: 50, sigma: 15, lowerBound: 0, upperBound: 100); + +// Or truncate any distribution +var normal = new Normal(50, 15); +var truncated = new TruncatedDistribution(normal, 0, 100); + +double mean = truncated.Mean; // Different from untruncated mean +``` + +### Mixture Distributions + +Model multi-modal data with mixture distributions: + +```cs +// Mixture of two normals (bimodal) +var component1 = new Normal(100, 10); +var component2 = new Normal(150, 15); +var weights = new double[] { 0.6, 0.4 }; // 60% from first, 40% from second + +var mixture = new Mixture(new IUnivariateDistribution[] { component1, component2 }, weights); + +// PDF will show two peaks +double pdf = mixture.PDF(125); // Valley between modes +``` + +### Empirical Distribution + +Non-parametric distribution from data: + +```cs +double[] observations = { 12.5, 15.3, 11.2, 18.7, 14.1, 16.8, 13.4, 17.2 }; + +var empirical = new EmpiricalDistribution(observations); + +// Uses linear interpolation for quantiles +double median = empirical.InverseCDF(0.5); +double q90 = empirical.InverseCDF(0.9); + +Console.WriteLine($"Empirical median: {median:F2}"); +Console.WriteLine($"Empirical 90th percentile: {q90:F2}"); +``` + +### Kernel Density Estimation + +Smooth non-parametric density estimation: + +```cs +var kde = new KernelDensity(observations, bandwidth: 1.5); + +// Smooth PDF +double density = kde.PDF(15.0); + +// KDE-based CDF and quantiles +double cdf = kde.CDF(15.0); +double quantile = kde.InverseCDF(0.75); +``` + +### PERT Distributions + +For expert judgment and project management: + +```cs +// PERT from minimum, most likely, maximum +var pert = new Pert(min: 10, mode: 15, max: 25); + +// PERT from percentile judgments +var pertPercentile = new PertPercentile(p10: 12, p50: 15, p90: 22); + +// Use for duration or cost uncertainty +double expectedDuration = pert.Mean; +double variance = pert.Variance; +``` + +## Random Number Generation + +All distributions can generate random samples: + +```cs +var normal = new Normal(100, 15); + +// Generate 1000 random values +double[] samples = normal.GenerateRandomValues(sampleSize: 1000, seed: 12345); + +Console.WriteLine($"Sample mean: {samples.Average():F2}"); +Console.WriteLine($"Sample std dev: {Statistics.StandardDeviation(samples):F2}"); + +// Use -1 or 0 for seed to use system clock +double[] randomSamples = normal.GenerateRandomValues(1000, seed: -1); +``` + +## Practical Examples + +### Example 1: Computing Return Periods + +```cs +// Fit distribution to annual maximum flood data +double[] annualMaxFlows = { 12500, 15300, 11200, 18700, 14100, 16800 }; + +var gev = new GeneralizedExtremeValue(); +gev.Estimate(annualMaxFlows, ParameterEstimationMethod.MethodOfLinearMoments); + +// Compute floods for different return periods +var returnPeriods = new int[] { 2, 5, 10, 25, 50, 100, 200, 500 }; + +Console.WriteLine("Return Period Analysis:"); +Console.WriteLine("Return Period | Annual Exceedance Prob | Flood Magnitude"); +Console.WriteLine("-----------------------------------------------------------"); + +foreach (var T in returnPeriods) +{ + double aep = 1.0 / T; // Annual exceedance probability + double nep = 1.0 - aep; // Non-exceedance probability + double flood = gev.InverseCDF(nep); + + Console.WriteLine($"{T,13} | {aep,22:F6} | {flood,15:F0}"); +} +``` + +### Example 2: Probability of Exceedance + +```cs +var lp3 = new LogPearsonTypeIII(mu: 10.2, sigma: 0.3, gamma: 0.4); + +// What's the probability a flood exceeds 50,000 cfs? +double threshold = 50000; +double exceedanceProb = lp3.CCDF(threshold); +double returnPeriod = 1.0 / exceedanceProb; + +Console.WriteLine($"Probability of exceeding {threshold:N0} cfs: {exceedanceProb:F6}"); +Console.WriteLine($"Equivalent return period: {returnPeriod:F1} years"); +``` + +### Example 3: Comparing Distributions + +```cs +double[] data = { 12.5, 15.3, 11.2, 18.7, 14.1, 16.8, 13.4, 17.2, 10.5, 19.3 }; + +// Fit multiple distributions +var normal = new Normal(); +normal.Estimate(data, ParameterEstimationMethod.MethodOfMoments); + +var lognormal = new LogNormal(); +lognormal.Estimate(data, ParameterEstimationMethod.MethodOfMoments); + +var gev = new GeneralizedExtremeValue(); +gev.Estimate(data, ParameterEstimationMethod.MethodOfLinearMoments); + +// Compare at various quantiles +var probs = new double[] { 0.5, 0.9, 0.95, 0.99 }; + +Console.WriteLine("Quantile Comparison:"); +Console.WriteLine("Probability | Normal | Log-Normal | GEV"); +Console.WriteLine("----------------------------------------------"); + +foreach (var p in probs) +{ + Console.WriteLine($"{p,11:F2} | {normal.InverseCDF(p),6:F1} | {lognormal.InverseCDF(p),10:F1} | {gev.InverseCDF(p),3:F1}"); +} +``` + +### Example 4: Reliability Analysis + +```cs +// Component with Weibull failure time distribution +var weibull = new Weibull(alpha: 1000, beta: 2.5); // hours + +// Reliability at time t (probability of survival) +double t = 500; // hours +double reliability = weibull.CCDF(t); +double failureProb = weibull.CDF(t); + +Console.WriteLine($"At t = {t} hours:"); +Console.WriteLine($" Reliability: {reliability:F4}"); +Console.WriteLine($" Failure probability: {failureProb:F4}"); +Console.WriteLine($" Hazard rate: {weibull.HF(t):E3}"); + +// Mean time to failure +Console.WriteLine($" MTTF: {weibull.Mean:F1} hours"); +``` + +### Example 5: Risk Assessment + +```cs +// Annual probability of dam failure +var failureProb = new Beta(alpha: 2, beta: 1998); // ~0.001 + +// Generate scenarios +double[] scenarios = failureProb.GenerateRandomValues(10000, seed: 12345); + +// Estimate risk metrics +Console.WriteLine($"Expected annual failure probability: {failureProb.Mean:E4}"); +Console.WriteLine($"95th percentile: {failureProb.InverseCDF(0.95):E4}"); +Console.WriteLine($"Scenarios > 0.002: {scenarios.Count(x => x > 0.002)} / 10000"); +``` + +## Distribution Selection Guidelines + +| Data Characteristics | Recommended Distribution(s) | +|---------------------|----------------------------| +| Symmetric, unbounded | Normal, Student's t (heavy tails) | +| Right-skewed, positive | Log-Normal, Gamma, Weibull | +| Left-skewed | Beta, Generalized Beta | +| Heavy tails | Student's t, Cauchy, Pareto | +| Bounded [a,b] | Uniform, Beta, Triangular, PERT | +| Extreme values (maxima) | GEV, Gumbel, Weibull | +| Extreme values (minima) | GEV (negative), Weibull (reversed) | +| Threshold exceedances | Generalized Pareto | +| Flood frequency | LP3, GEV, Pearson Type III | +| Failure/survival times | Weibull, Exponential, Gamma | +| Count data | Poisson, Binomial | +| Expert judgment | PERT, PERT Percentile, Triangular | +| Non-parametric | Empirical, Kernel Density | + +## Parameter Bounds and Validation + +All distributions validate parameters: + +```cs +var gev = new GeneralizedExtremeValue(); + +// Check if parameters are valid +var params = new double[] { 1000, 200, 0.3 }; +var exception = gev.ValidateParameters(params, throwException: false); + +if (exception == null) +{ + gev.SetParameters(params); + Console.WriteLine("Parameters are valid"); +} +else +{ + Console.WriteLine($"Invalid parameters: {exception.Message}"); +} + +// Get parameter bounds +double[] minParams = gev.MinimumOfParameters; +double[] maxParams = gev.MaximumOfParameters; + +Console.WriteLine("Parameter bounds:"); +for (int i = 0; i < gev.NumberOfParameters; i++) +{ + Console.WriteLine($" Param {i}: [{minParams[i]}, {maxParams[i]}]"); +} +``` + +## Distribution Information + +```cs +var normal = new Normal(100, 15); + +// Display information +Console.WriteLine($"Distribution: {normal.DisplayName}"); +Console.WriteLine($"Short name: {normal.ShortDisplayName}"); +Console.WriteLine($"Type: {normal.Type}"); +Console.WriteLine($"Parameters: {normal.DisplayLabel}"); +Console.WriteLine($"Number of parameters: {normal.NumberOfParameters}"); + +// Parameter names +string[] paramNames = normal.ParameterNamesShortForm; +double[] paramValues = normal.GetParameters; + +for (int i = 0; i < normal.NumberOfParameters; i++) +{ + Console.WriteLine($" {paramNames[i]} = {paramValues[i]:F3}"); +} +``` + +--- + +## References + +[1] Bulletin 17C: Guidelines for Determining Flood Flow Frequency. (2017). U.S. Geological Survey Techniques and Methods, Book 4, Chapter B5. + +[2] Coles, S. (2001). *An Introduction to Statistical Modeling of Extreme Values*. Springer. + +[3] Hosking, J. R. M., & Wallis, J. R. (1997). *Regional Frequency Analysis: An Approach Based on L-Moments*. Cambridge University Press. + +--- + +[← Back to Index](../index.md) | [Next: Parameter Estimation →](parameter-estimation.md) diff --git a/docs/getting-started.md b/docs/getting-started.md new file mode 100644 index 00000000..91a9a551 --- /dev/null +++ b/docs/getting-started.md @@ -0,0 +1,347 @@ +# Getting Started + +This guide will help you get up and running with the ***Numerics*** library quickly. + +## Installation + +### NuGet Package + +The easiest way to install ***Numerics*** is via NuGet: + +```bash +dotnet add package Numerics +``` + +Or add the following to your `.csproj` file: + +```xml + +``` + +### Manual Installation + +Download the compiled DLL from the releases page and add a reference to your project: + +```xml + + path\to\Numerics.dll + +``` + +## Required Namespaces + +Import the namespaces you need at the top of your C# files: + +```cs +// Core distributions +using Numerics.Distributions; + +// Statistical functions +using Numerics.Data.Statistics; + +// Numerical methods +using Numerics.Mathematics.Integration; +using Numerics.Mathematics.Differentiation; +using Numerics.Mathematics.Optimization; +using Numerics.Mathematics.LinearAlgebra; +using Numerics.Mathematics.RootFinding; + +// Sampling and MCMC +using Numerics.Sampling; +using Numerics.Sampling.MCMC; + +// Interpolation +using Numerics.Data.Interpolation; +``` + +## Working with Distributions + +### Creating a Distribution + +All univariate distributions implement a consistent interface. You can create distributions by specifying parameters directly: + +```cs +using Numerics.Distributions; + +// Normal distribution with mean=100, std=15 +var normal = new Normal(100, 15); + +// Generalized Extreme Value with location=1000, scale=200, shape=-0.1 +var gev = new GeneralizedExtremeValue(1000, 200, -0.1); + +// Log-Pearson Type III (commonly used in hydrology) +var lp3 = new LogPearsonTypeIII(3.0, 0.5, 0.2); +``` + +### Probability Functions + +Every distribution provides standard probability functions: + +```cs +var dist = new Normal(0, 1); // Standard normal + +// Probability Density Function (PDF) +double density = dist.PDF(1.5); + +// Cumulative Distribution Function (CDF) +double probability = dist.CDF(1.96); // ≈ 0.975 + +// Inverse CDF (Quantile Function) +double quantile = dist.InverseCDF(0.975); // ≈ 1.96 + +// Complementary CDF (Survival Function) +double exceedance = dist.CCDF(1.96); // ≈ 0.025 +``` + +### Distribution Properties + +Access statistical properties directly: + +```cs +var dist = new Normal(100, 15); + +Console.WriteLine($"Mean: {dist.Mean}"); +Console.WriteLine($"Median: {dist.Median}"); +Console.WriteLine($"Mode: {dist.Mode}"); +Console.WriteLine($"Variance: {dist.Variance}"); +Console.WriteLine($"Std Dev: {dist.StandardDeviation}"); +Console.WriteLine($"Skewness: {dist.Skew}"); +Console.WriteLine($"Kurtosis: {dist.Kurtosis}"); +``` + +### Random Number Generation + +Generate random samples from any distribution: + +```cs +var dist = new Normal(100, 15); + +// Single random value +double x = dist.InverseCDF(new Random().NextDouble()); + +// Multiple random values (more efficient) +double[] samples = new double[1000]; +dist.GenerateRandomValues(samples); + +// With specific seed for reproducibility +dist.GenerateRandomValues(samples, seed: 12345); +``` + +## Fitting Distributions to Data + +### Parameter Estimation Methods + +***Numerics*** supports three estimation methods: + +```cs +double[] data = { 10.2, 15.1, 12.3, 18.7, 14.2, 16.8, 13.1, 17.5 }; + +var normal = new Normal(); + +// Method of Moments (MOM) +if (normal is IMomentEstimation mom) +{ + normal.SetParameters(mom.ParametersFromMoments(data)); +} + +// L-Moments (LMOM) - preferred for heavy-tailed distributions +if (normal is ILinearMomentEstimation lmom) +{ + normal.SetParameters(lmom.ParametersFromLinearMoments(data)); +} + +// Maximum Likelihood Estimation (MLE) +if (normal is IMaximumLikelihoodEstimation mle) +{ + normal.SetParameters(mle.MLE(data)); +} +``` + +### Hydrologic Frequency Analysis Example + +```cs +using Numerics.Distributions; +using Numerics.Data.Statistics; + +// Annual maximum streamflow data (cfs) +double[] annualMax = { 12500, 15200, 11800, 18900, 14200, + 16500, 13400, 17800, 10900, 19500 }; + +// Fit Log-Pearson Type III using L-Moments +var lp3 = new LogPearsonTypeIII(); +lp3.SetParameters(lp3.ParametersFromLinearMoments(annualMax)); + +// Compute flood quantiles +Console.WriteLine("Return Period Analysis:"); +Console.WriteLine($" 10-year flood (10% AEP): {lp3.InverseCDF(0.90):N0} cfs"); +Console.WriteLine($" 50-year flood (2% AEP): {lp3.InverseCDF(0.98):N0} cfs"); +Console.WriteLine($" 100-year flood (1% AEP): {lp3.InverseCDF(0.99):N0} cfs"); +Console.WriteLine($" 500-year flood (0.2% AEP):{lp3.InverseCDF(0.998):N0} cfs"); + +// Assess goodness-of-fit +double aic = GoodnessOfFit.AIC(lp3.NumberOfParameters, lp3.LogLikelihood(annualMax)); +Console.WriteLine($"\nAIC: {aic:F2}"); +``` + +## Numerical Integration + +### One-Dimensional Integration + +```cs +using Numerics.Mathematics.Integration; + +// Define the function to integrate +Func f = x => Math.Exp(-x * x); + +// Adaptive Simpson's Rule (good general-purpose method) +var simpson = new AdaptiveSimpsonsRule(f, -5, 5); +simpson.Integrate(); +Console.WriteLine($"Simpson's: {simpson.Result:F10}"); + +// Gauss-Kronrod (higher accuracy for smooth functions) +var gk = new AdaptiveGaussKronrod(f, -5, 5); +gk.Integrate(); +Console.WriteLine($"Gauss-Kronrod: {gk.Result:F10}"); +``` + +### Multi-Dimensional Integration + +```cs +using Numerics.Mathematics.Integration; + +// 3D function: f(x,y,z) = x*y*z over [0,1]³ +Func f3d = x => x[0] * x[1] * x[2]; +double[] lower = { 0, 0, 0 }; +double[] upper = { 1, 1, 1 }; + +// Monte Carlo integration +var mc = new MonteCarlo(f3d, lower, upper); +mc.Iterations = 100000; +mc.Integrate(); +Console.WriteLine($"Monte Carlo: {mc.Result:F6} ± {mc.Error:F6}"); + +// VEGAS adaptive importance sampling +var vegas = new Vegas(f3d, lower, upper); +vegas.Iterations = 10000; +vegas.Integrate(); +Console.WriteLine($"VEGAS: {vegas.Result:F6} ± {vegas.Error:F6}"); +``` + +## Optimization + +### Local Optimization + +```cs +using Numerics.Mathematics.Optimization; + +// Rosenbrock function (classic test function) +Func rosenbrock = x => +{ + double a = 1 - x[0]; + double b = x[1] - x[0] * x[0]; + return a * a + 100 * b * b; +}; + +// BFGS (quasi-Newton method) +var bfgs = new BFGS(rosenbrock, 2, new double[] { -1, -1 }); +bfgs.Minimize(); +Console.WriteLine($"BFGS minimum at: ({bfgs.BestParameterSet[0]:F6}, {bfgs.BestParameterSet[1]:F6})"); +Console.WriteLine($"Function value: {bfgs.BestFitness:E6}"); +``` + +### Global Optimization + +```cs +using Numerics.Mathematics.Optimization; + +// Multi-modal function with many local minima +Func rastrigin = x => +{ + double sum = 10 * x.Length; + for (int i = 0; i < x.Length; i++) + sum += x[i] * x[i] - 10 * Math.Cos(2 * Math.PI * x[i]); + return sum; +}; + +double[] lower = { -5.12, -5.12 }; +double[] upper = { 5.12, 5.12 }; + +// Differential Evolution +var de = new DifferentialEvolution(rastrigin, 2, lower, upper); +de.Minimize(); +Console.WriteLine($"DE minimum at: ({de.BestParameterSet[0]:F6}, {de.BestParameterSet[1]:F6})"); +``` + +## MCMC Sampling + +### Basic Bayesian Inference + +```cs +using Numerics.Sampling.MCMC; +using Numerics.Distributions; + +// Observed data +double[] observations = { 5.2, 4.8, 5.1, 5.5, 4.9, 5.3, 5.0, 5.2 }; + +// Prior distributions for mean and standard deviation +var priors = new List +{ + new Normal(5, 2), // Prior for mean: N(5, 2) + new Uniform(0.1, 5) // Prior for std: U(0.1, 5) +}; + +// Log-likelihood function +double LogLikelihood(double[] theta) +{ + double mu = theta[0]; + double sigma = theta[1]; + if (sigma <= 0) return double.NegativeInfinity; + + var model = new Normal(mu, sigma); + return model.LogLikelihood(observations); +} + +// Run DE-MCz sampler +var sampler = new DEMCz(priors, LogLikelihood); +sampler.Iterations = 20000; +sampler.WarmupIterations = 5000; +sampler.Sample(); + +// Analyze results +var output = sampler.Output; +Console.WriteLine($"Mean estimate: {output.Mean(0):F3} [{output.Percentile(0, 0.025):F3}, {output.Percentile(0, 0.975):F3}]"); +Console.WriteLine($"Std estimate: {output.Mean(1):F3} [{output.Percentile(1, 0.025):F3}, {output.Percentile(1, 0.975):F3}]"); +``` + +## Performance Tips + +1. **Use parallelization** for large-scale Monte Carlo simulations: + ```cs + sampler.ParallelizeChains = true; + ``` + +2. **Pre-allocate arrays** when generating many random values: + ```cs + double[] values = new double[100000]; + dist.GenerateRandomValues(values); + ``` + +3. **Choose appropriate integration method** based on function smoothness: + - Smooth functions → Gauss-Kronrod or Adaptive Simpson's + - Functions with discontinuities → Adaptive methods with smaller tolerance + - High dimensions → Monte Carlo methods + +4. **Set reasonable tolerances** for iterative methods to balance accuracy and speed. + +## Next Steps + +- Explore the [Univariate Distributions](distributions/univariate.md) reference +- Learn about [Parameter Estimation](distributions/parameter-estimation.md) methods +- Understand [Goodness-of-Fit](statistics/goodness-of-fit.md) metrics +- Dive into [MCMC Methods](sampling/mcmc.md) for Bayesian inference + +--- + +## References + +[1] Press, W. H., Teukolsky, S. A., Vetterling, W. T., & Flannery, B. P. (2007). *Numerical Recipes: The Art of Scientific Computing* (3rd ed.). Cambridge University Press. diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..b6ca652b --- /dev/null +++ b/docs/index.md @@ -0,0 +1,216 @@ +# Numerics Library Documentation + +## Overview + +***Numerics*** is a comprehensive numerical computing library for .NET, developed by the U.S. Army Corps of Engineers Risk Management Center for quantitative risk assessment applications. The library provides robust implementations of probability distributions, statistical analysis, numerical methods, optimization algorithms, and Markov Chain Monte Carlo (MCMC) sampling techniques. + +The library is designed for engineers, scientists, and researchers who need reliable numerical computing capabilities, particularly in the domains of: + +- Hydrological frequency analysis +- Infrastructure risk assessment +- Monte Carlo simulation +- Bayesian parameter estimation +- Statistical model validation + +## Key Features + +### Probability Distributions +- 40+ univariate probability distributions with PDF, CDF, and inverse CDF +- Multiple parameter estimation methods (Method of Moments, L-Moments, Maximum Likelihood) +- Uncertainty analysis via bootstrap resampling +- Bivariate copulas for dependency modeling +- Multivariate normal distribution + +### Statistical Analysis +- Comprehensive goodness-of-fit metrics (NSE, KGE, RMSE, PBIAS, AIC/BIC) +- Descriptive statistics and hypothesis tests +- Autocorrelation and time series analysis +- Outlier detection (Multiple Grubbs-Beck Test) + +### Numerical Methods +- Adaptive numerical integration (Simpson's Rule, Gauss-Lobatto, Gauss-Kronrod) +- Multidimensional integration (Monte Carlo, MISER, VEGAS) +- Numerical differentiation (two-point, Ridder's method) +- Root finding algorithms (Bisection, Brent, Newton-Raphson) +- ODE solvers (Runge-Kutta methods) + +### Optimization +- Local optimization (BFGS, Nelder-Mead, Powell, Golden Section) +- Global optimization (Differential Evolution, SCE-UA, Simulated Annealing, Particle Swarm) +- Constrained optimization (Augmented Lagrangian) + +### MCMC Sampling +- Random Walk Metropolis-Hastings (RWMH) +- Adaptive Random Walk Metropolis-Hastings (ARWMH) +- Differential Evolution MCMC (DE-MCz, DE-MCzs) +- Hamiltonian Monte Carlo (HMC) +- Gibbs sampling +- Convergence diagnostics (Gelman-Rubin, Effective Sample Size) + +## Quick Start + +### Creating a Distribution + +```cs +using Numerics.Distributions; + +// Create a Normal distribution +var normal = new Normal(100, 15); + +// Compute probability functions +double pdf = normal.PDF(110); // f(110) +double cdf = normal.CDF(110); // P(X ≤ 110) +double quantile = normal.InverseCDF(0.95); // x such that P(X ≤ x) = 0.95 + +Console.WriteLine($"PDF(110) = {pdf:F6}"); +Console.WriteLine($"CDF(110) = {cdf:F4}"); +Console.WriteLine($"95th percentile = {quantile:F2}"); +``` + +### Fitting a Distribution to Data + +```cs +using Numerics.Distributions; + +double[] annualMaxFlows = { 1200, 1500, 1100, 1800, 1350, 1600, 1250, 1450 }; + +// Fit using L-Moments (recommended for hydrologic data) +var gev = new GeneralizedExtremeValue(); +gev.SetParameters(gev.ParametersFromLinearMoments(annualMaxFlows)); + +// Compute the 100-year flood (1% annual exceedance probability) +double q100 = gev.InverseCDF(0.99); +Console.WriteLine($"100-year flood estimate: {q100:F0} cfs"); +``` + +### Numerical Integration + +```cs +using Numerics.Mathematics.Integration; + +// Integrate f(x) = x² from 0 to 1 +Func f = x => x * x; +var asr = new AdaptiveSimpsonsRule(f, 0, 1); +asr.Integrate(); +double result = asr.Result; // 0.333... +Console.WriteLine($"∫₀¹ x² dx = {result:F10}"); +``` + +### MCMC Sampling + +```cs +using Numerics.Sampling.MCMC; +using Numerics.Distributions; + +// Define prior distributions +var priors = new List +{ + new Normal(0, 10), // Prior for parameter 1 + new Uniform(0, 100) // Prior for parameter 2 +}; + +// Define log-likelihood function +double LogLikelihood(double[] parameters) +{ + // Your likelihood calculation here + return -0.5 * Math.Pow(parameters[0] - 5, 2); +} + +// Create and run sampler +var sampler = new DEMCz(priors, LogLikelihood); +sampler.Iterations = 10000; +sampler.Sample(); + +// Get results +var results = sampler.Output; +``` + +## Documentation Structure + +📘 **Status Legend:** +- ✅ = Reviewed and updated with accurate code examples +- 📝 = Draft (needs verification against actual library) + +| Document | Status | Description | +|----------|--------|-------------| +| [Getting Started](getting-started.md) | ✅ | Installation and basic usage patterns | +| **Mathematics** | | | +| [Numerical Integration](mathematics/integration.md) | ✅ | Comprehensive guide to 1D, 2D, and multidimensional integration | +| [Numerical Differentiation](mathematics/differentiation.md) | ✅ | Derivatives, gradients, Hessians, and Jacobians | +| [Optimization](mathematics/optimization.md) | ✅ | Local and global optimization algorithms | +| [Root Finding](mathematics/root-finding.md) | ✅ | Equation solving methods | +| [Linear Algebra](mathematics/linear-algebra.md) | ✅ | Matrix and vector operations | +| [Special Functions](mathematics/special-functions.md) | ✅ | Gamma, Beta, Error functions | +| [ODE Solvers](mathematics/ode-solvers.md) | ✅ | Runge-Kutta methods | +| **Distributions** | | | +| [Univariate Distributions](distributions/univariate.md) | ✅ | Complete reference for univariate distributions | +| [Multivariate Distributions](distributions/multivariate.md) | ✅ | Multivariate Normal distribution | +| [Parameter Estimation](distributions/parameter-estimation.md) | ✅ | Fitting distributions to data | +| [Uncertainty Analysis](distributions/uncertainty-analysis.md) | ✅ | Bootstrap and confidence intervals | +| [Copulas](distributions/copulas.md) | ✅ | Dependency modeling with copulas | +| **Statistics** | | | +| [Descriptive Statistics](statistics/descriptive.md) | ✅ | Summary statistics functions | +| [Goodness-of-Fit](statistics/goodness-of-fit.md) | ✅ | Model evaluation metrics | +| [Hypothesis Tests](statistics/hypothesis-tests.md) | ✅ | Statistical hypothesis testing | +| **Data** | | | +| [Interpolation](data/interpolation.md) | ✅ | Interpolation methods | +| [Time Series](data/time-series.md) | ✅ | Time series data structures and analysis | +| **Machine Learning** | | | +| [Overview](machine-learning/overview.md) | ✅ | Supervised and unsupervised learning algorithms | +| **Sampling** | | | +| [MCMC Methods](sampling/mcmc.md) | ✅ | Markov Chain Monte Carlo samplers | +| [Convergence Diagnostics](sampling/convergence-diagnostics.md) | ✅ | MCMC convergence assessment | +| [Random Generation](sampling/random-generation.md) | ✅ | PRNGs, quasi-random, and sampling methods | +| [References](references.md) | ✅ | Complete bibliography | + +## Namespaces + +| Namespace | Description | +|-----------|-------------| +| `Numerics.Distributions` | Probability distributions and copulas | +| `Numerics.Data.Statistics` | Statistical functions and tests | +| `Numerics.Data.Interpolation` | Interpolation methods | +| `Numerics.Data.TimeSeries` | Time series data structures | +| `Numerics.Mathematics` | Base namespace for mathematical operations | +| `Numerics.Mathematics.Integration` | Numerical integration methods | +| `Numerics.Mathematics.Differentiation` | Numerical differentiation (via NumericalDerivative class) | +| `Numerics.Mathematics.Optimization` | Optimization algorithms | +| `Numerics.Mathematics.LinearAlgebra` | Matrix and vector operations | +| `Numerics.Mathematics.RootFinding` | Root finding algorithms | +| `Numerics.Mathematics.SpecialFunctions` | Gamma, Beta, Error functions | +| `Numerics.Sampling` | Random sampling and stratification | +| `Numerics.Sampling.MCMC` | MCMC samplers and diagnostics | + +## Installation + +Install via NuGet Package Manager: + +``` +Install-Package RMC.Numerics +``` + +Or using the .NET CLI: + +``` +dotnet add package RMC.Numerics +``` + +## Support and Contributing + +This library is developed and maintained by the U.S. Army Corps of Engineers Risk Management Center. For questions, bug reports, or feature requests, please contact the development team. + +## License + +This software is provided under a BSD-3-Clause license. See the LICENSE file for complete terms. + +--- + +## Key References + +[1] Press, W. H., Teukolsky, S. A., Vetterling, W. T., & Flannery, B. P. (2007). *Numerical Recipes: The Art of Scientific Computing* (3rd ed.). Cambridge University Press. + +[2] Hosking, J. R. M. (1990). L-moments: Analysis and estimation of distributions using linear combinations of order statistics. *Journal of the Royal Statistical Society: Series B*, 52(1), 105-124. + +[3] ter Braak, C. J. F., & Vrugt, J. A. (2008). Differential Evolution Markov Chain with snooker updater and fewer chains. *Statistics and Computing*, 18(4), 435-446. + +[4] Moriasi, D. N., et al. (2007). Model evaluation guidelines for systematic quantification of accuracy in watershed simulations. *Transactions of the ASABE*, 50(3), 885-900. diff --git a/docs/machine-learning/overview.md b/docs/machine-learning/overview.md new file mode 100644 index 00000000..85624eb8 --- /dev/null +++ b/docs/machine-learning/overview.md @@ -0,0 +1,711 @@ +# Machine Learning + +[← Back to Index](../index.md) + +The ***Numerics*** library provides machine learning algorithms for both supervised and unsupervised learning tasks. These implementations are designed for engineering and scientific applications including classification, regression, and clustering. + +## Overview + +**Supervised Learning:** +- Generalized Linear Models (GLM) +- Decision Trees +- Random Forests +- k-Nearest Neighbors (KNN) +- Naive Bayes + +**Unsupervised Learning:** +- k-Means Clustering +- Gaussian Mixture Models (GMM) +- Jenks Natural Breaks + +--- + +## Supervised Learning + +### Generalized Linear Models (GLM) + +GLMs extend linear regression to non-normal response distributions [[1]](#1): + +```cs +using Numerics.MachineLearning; +using Numerics.Mathematics.LinearAlgebra; + +// Training data +double[,] X = { + { 1, 2.5, 1.2 }, // Observation 1: [intercept, feature1, feature2] + { 1, 3.1, 1.5 }, + { 1, 2.8, 1.1 }, + { 1, 3.5, 1.8 }, + { 1, 2.2, 0.9 } +}; + +double[] y = { 45.2, 52.3, 47.8, 58.1, 42.5 }; // Response variable + +// Create GLM +var glm = new GeneralizedLinearModel( + x: new Matrix(X), + y: new Vector(y), + family: GLMFamily.Normal, // Distribution family + linkFunction: LinkFunction.Identity // Link function +); + +// Set optimizer (optional) +glm.SetOptimizer(LocalMethod.NelderMead); + +// Train model +glm.Train(); + +Console.WriteLine("GLM Results:"); +Console.WriteLine($"Parameters: [{string.Join(", ", glm.Parameters.Select(p => p.ToString("F4")))}]"); +Console.WriteLine($"Standard Errors: [{string.Join(", ", glm.ParameterStandardErrors.Select(se => se.ToString("F4")))}]"); +Console.WriteLine($"p-values: [{string.Join(", ", glm.ParameterPValues.Select(p => p.ToString("F4")))}]"); + +// Model selection criteria +Console.WriteLine($"\nModel Selection:"); +Console.WriteLine($" AIC: {glm.AIC:F2}"); +Console.WriteLine($" BIC: {glm.BIC:F2}"); +Console.WriteLine($" Standard Error: {glm.StandardError:F4}"); + +// Make predictions +double[,] XNew = { + { 1, 3.0, 1.4 }, + { 1, 2.6, 1.0 } +}; + +double[] predictions = glm.Predict(new Matrix(XNew)); + +Console.WriteLine($"\nPredictions:"); +for (int i = 0; i < predictions.Length; i++) +{ + Console.WriteLine($" X_new[{i}] → {predictions[i]:F2}"); +} + +// Prediction intervals (alpha = 0.1 for 90% interval) +double[,] intervals = glm.Predict(new Matrix(XNew), alpha: 0.1); + +Console.WriteLine($"\n90% Prediction Intervals:"); +for (int i = 0; i < XNew.GetLength(0); i++) +{ + Console.WriteLine($" X_new[{i}]: [{intervals[i, 0]:F2}, {intervals[i, 1]:F2}]"); +} +``` + +**Supported Families:** +- `GLMFamily.Normal` - Gaussian (linear regression) +- `GLMFamily.Binomial` - Binary outcomes (logistic regression) +- `GLMFamily.Poisson` - Count data +- `GLMFamily.Gamma` - Positive continuous data + +**Link Functions:** +- `LinkFunction.Identity` - g(μ) = μ +- `LinkFunction.Log` - g(μ) = log(μ) +- `LinkFunction.Logit` - g(μ) = log(μ/(1-μ)) +- `LinkFunction.Probit` - g(μ) = Φ⁻¹(μ) + +### Decision Trees + +Classification and regression trees [[2]](#2): + +```cs +using Numerics.MachineLearning; + +// Classification example +double[,] X = { + { 5.1, 3.5, 1.4, 0.2 }, // Iris features + { 4.9, 3.0, 1.4, 0.2 }, + { 7.0, 3.2, 4.7, 1.4 }, + { 6.4, 3.2, 4.5, 1.5 }, + { 6.3, 3.3, 6.0, 2.5 }, + { 5.8, 2.7, 5.1, 1.9 } +}; + +double[] y = { 0, 0, 1, 1, 2, 2 }; // Classes: Setosa(0), Versicolor(1), Virginica(2) + +// Create decision tree +var tree = new DecisionTree( + X: X, + y: y, + maxDepth: 5, // Maximum tree depth + minSamplesSplit: 2, // Minimum samples to split node + minSamplesLeaf: 1 // Minimum samples in leaf +); + +// Train +tree.Train(); + +Console.WriteLine($"Decision Tree Trained: {tree.IsTrained}"); + +// Predict +double[] testSample = { 5.0, 3.0, 1.6, 0.2 }; +double[] prediction = tree.Predict(testSample); + +Console.WriteLine($"Prediction for test sample: Class {prediction[0]}"); + +// Predict multiple samples +double[,] testSamples = { + { 5.0, 3.0, 1.6, 0.2 }, + { 6.0, 3.0, 4.5, 1.5 }, + { 6.5, 3.0, 5.5, 2.0 } +}; + +double[] predictions = tree.Predict(testSamples); + +Console.WriteLine("\nBatch predictions:"); +for (int i = 0; i < predictions.Length; i++) +{ + Console.WriteLine($" Sample {i}: Class {predictions[i]}"); +} +``` + +### Random Forests + +Ensemble of decision trees for improved accuracy [[3]](#3): + +```cs +using Numerics.MachineLearning; + +double[,] X = { + // Same Iris data as above + { 5.1, 3.5, 1.4, 0.2 }, + { 4.9, 3.0, 1.4, 0.2 }, + { 7.0, 3.2, 4.7, 1.4 }, + { 6.4, 3.2, 4.5, 1.5 }, + { 6.3, 3.3, 6.0, 2.5 }, + { 5.8, 2.7, 5.1, 1.9 } +}; + +double[] y = { 0, 0, 1, 1, 2, 2 }; + +// Create random forest +var forest = new RandomForest( + X: X, + y: y, + nTrees: 100, // Number of trees + maxDepth: 5, + minSamplesSplit: 2, + minSamplesLeaf: 1, + maxFeatures: 2, // Features per split + bootstrap: true, // Bootstrap sampling + seed: 12345 +); + +// Train +forest.Train(); + +Console.WriteLine($"Random Forest Trained: {forest.IsTrained}"); +Console.WriteLine($"Number of trees: {forest.NTrees}"); + +// Predict with confidence intervals +double[] testSample = { 5.0, 3.0, 1.6, 0.2 }; +double[,] result = forest.Predict(testSample, alpha: 0.1); // 90% CI + +Console.WriteLine($"\nPrediction:"); +Console.WriteLine($" Predicted class: {result[0, 0]:F0}"); +Console.WriteLine($" 90% CI: [{result[0, 1]:F2}, {result[0, 2]:F2}]"); + +// Batch prediction +double[,] testSamples = { + { 5.0, 3.0, 1.6, 0.2 }, + { 6.0, 3.0, 4.5, 1.5 } +}; + +double[,] results = forest.Predict(testSamples, alpha: 0.1); + +Console.WriteLine($"\nBatch predictions:"); +for (int i = 0; i < testSamples.GetLength(0); i++) +{ + Console.WriteLine($" Sample {i}: Class {results[i, 0]:F0}, " + + $"CI [{results[i, 1]:F2}, {results[i, 2]:F2}]"); +} +``` + +**Advantages of Random Forests:** +- Reduces overfitting compared to single tree +- Provides prediction uncertainty +- Handles missing values well +- Works with mixed feature types + +### k-Nearest Neighbors (KNN) + +Non-parametric classification and regression [[4]](#4): + +```cs +using Numerics.MachineLearning; + +double[,] X = { + { 1.0, 2.0 }, + { 1.5, 1.8 }, + { 5.0, 8.0 }, + { 8.0, 8.0 }, + { 1.0, 0.6 }, + { 9.0, 11.0 } +}; + +double[] y = { 0, 0, 1, 1, 0, 1 }; // Binary classification + +// Create KNN classifier +var knn = new KNearestNeighbors( + X: X, + y: y, + k: 3, // Number of neighbors + weights: "uniform" // "uniform" or "distance" +); + +// KNN doesn't require explicit training +// Prediction happens at query time + +// Predict +double[] testPoint = { 2.0, 3.0 }; +double prediction = knn.Predict(testPoint); + +Console.WriteLine($"KNN Prediction for [{testPoint[0]}, {testPoint[1]}]: Class {prediction}"); + +// Predict with probability estimates +double[,] probs = knn.PredictProba(testPoint); + +Console.WriteLine($"Class probabilities:"); +for (int i = 0; i < probs.GetLength(0); i++) +{ + Console.WriteLine($" Class {i}: {probs[i, 0]:P1}"); +} +``` + +**Distance Metrics:** +- Euclidean (default) +- Manhattan +- Minkowski + +**Choosing k:** +- Small k: More sensitive to noise +- Large k: Smoother boundaries +- Rule of thumb: k = √n or use cross-validation + +### Naive Bayes + +Probabilistic classifier based on Bayes' theorem [[5]](#5): + +```cs +using Numerics.MachineLearning; + +// Text classification example (word counts) +double[,] X = { + { 2, 1, 0, 1 }, // Document 1: word counts + { 1, 1, 1, 0 }, + { 0, 3, 2, 1 }, + { 1, 0, 1, 2 } +}; + +double[] y = { 0, 0, 1, 1 }; // Classes: spam(1), ham(0) + +// Create Naive Bayes +var nb = new NaiveBayes(X: X, y: y); + +// Train +nb.Train(); + +Console.WriteLine("Naive Bayes trained"); + +// Predict +double[] testDoc = { 1, 2, 0, 1 }; +double prediction = nb.Predict(testDoc); + +Console.WriteLine($"Prediction: Class {prediction}"); + +// Class probabilities +double[] probabilities = nb.PredictProba(testDoc); + +Console.WriteLine($"Class probabilities:"); +Console.WriteLine($" Class 0 (ham): {probabilities[0]:P1}"); +Console.WriteLine($" Class 1 (spam): {probabilities[1]:P1}"); +``` + +**Assumptions:** +- Features are conditionally independent given class +- Works well despite violation of independence +- Fast training and prediction +- Good for text classification + +--- + +## Unsupervised Learning + +### k-Means Clustering + +Partition data into k clusters [[6]](#6): + +```cs +using Numerics.MachineLearning; + +// 2D data points +double[,] X = { + { 1.0, 2.0 }, + { 1.5, 1.8 }, + { 5.0, 8.0 }, + { 8.0, 8.0 }, + { 1.0, 0.6 }, + { 9.0, 11.0 }, + { 8.0, 2.0 }, + { 10.0, 2.0 }, + { 9.0, 3.0 } +}; + +// Create k-means with 3 clusters +var kmeans = new KMeans(X: X, k: 3); + +// Configure +kmeans.MaxIterations = 100; +kmeans.Tolerance = 1e-4; +kmeans.Seed = 12345; + +// Fit +kmeans.Fit(); + +Console.WriteLine($"k-Means Clustering (k={kmeans.K}):"); +Console.WriteLine($"Converged: {kmeans.HasConverged}"); +Console.WriteLine($"Iterations: {kmeans.Iterations}"); +Console.WriteLine($"Inertia: {kmeans.Inertia:F2}"); + +// Cluster centers +Console.WriteLine($"\nCluster Centers:"); +for (int i = 0; i < kmeans.K; i++) +{ + Console.WriteLine($" Cluster {i}: [{kmeans.Means[i, 0]:F2}, {kmeans.Means[i, 1]:F2}]"); +} + +// Cluster labels +Console.WriteLine($"\nCluster Assignments:"); +for (int i = 0; i < X.GetLength(0); i++) +{ + Console.WriteLine($" Point [{X[i, 0]:F1}, {X[i, 1]:F1}] → Cluster {kmeans.Labels[i]}"); +} + +// Predict cluster for new point +double[] newPoint = { 2.0, 3.0 }; +int cluster = kmeans.Predict(newPoint); + +Console.WriteLine($"\nNew point [{newPoint[0]}, {newPoint[1]}] → Cluster {cluster}"); + +// Cluster sizes +var clusterSizes = kmeans.Labels.GroupBy(l => l).Select(g => g.Count()).ToArray(); +Console.WriteLine($"\nCluster sizes: [{string.Join(", ", clusterSizes)}]"); +``` + +**Choosing k:** +- Elbow method (plot inertia vs. k) +- Silhouette analysis +- Domain knowledge + +**Initialization Methods:** +- Random selection +- k-means++ (default, better initialization) + +### Gaussian Mixture Models (GMM) + +Probabilistic clustering with soft assignments [[7]](#7): + +```cs +using Numerics.MachineLearning; + +double[,] X = { + // Same data as k-means example + { 1.0, 2.0 }, { 1.5, 1.8 }, { 5.0, 8.0 }, + { 8.0, 8.0 }, { 1.0, 0.6 }, { 9.0, 11.0 } +}; + +// Create GMM with 2 components +var gmm = new GaussianMixtureModel( + X: X, + nComponents: 2, + covarianceType: "full" // "full", "tied", "diag", "spherical" +); + +// Configure +gmm.MaxIterations = 100; +gmm.Tolerance = 1e-3; +gmm.Seed = 12345; + +// Fit using EM algorithm +gmm.Fit(); + +Console.WriteLine($"GMM Clustering ({gmm.NComponents} components):"); +Console.WriteLine($"Converged: {gmm.HasConverged}"); +Console.WriteLine($"Log-likelihood: {gmm.LogLikelihood:F2}"); +Console.WriteLine($"BIC: {gmm.BIC:F2}"); +Console.WriteLine($"AIC: {gmm.AIC:F2}"); + +// Component parameters +Console.WriteLine($"\nComponent Parameters:"); +for (int i = 0; i < gmm.NComponents; i++) +{ + Console.WriteLine($" Component {i}:"); + Console.WriteLine($" Weight: {gmm.Weights[i]:F3}"); + Console.WriteLine($" Mean: [{string.Join(", ", gmm.Means[i].Select(m => m.ToString("F2")))}]"); +} + +// Predict (hard assignment) +double[] newPoint = { 2.0, 3.0 }; +int component = gmm.Predict(newPoint); + +Console.WriteLine($"\nNew point [{newPoint[0]}, {newPoint[1]}] → Component {component}"); + +// Predict probabilities (soft assignment) +double[] probabilities = gmm.PredictProba(newPoint); + +Console.WriteLine($"Component probabilities:"); +for (int i = 0; i < probabilities.Length; i++) +{ + Console.WriteLine($" Component {i}: {probabilities[i]:P1}"); +} +``` + +**Advantages over k-Means:** +- Soft clustering (probabilistic assignments) +- Flexible cluster shapes (elliptical vs. spherical) +- Provides uncertainty quantification +- Can model overlapping clusters + +### Jenks Natural Breaks + +Optimal classification for univariate data [[8]](#8): + +```cs +using Numerics.MachineLearning; + +// Data values (e.g., elevation, rainfall, etc.) +double[] data = { 10, 12, 15, 18, 22, 25, 28, 35, 40, 45, 50, 55, 60, 70, 80 }; + +// Find natural breaks with 4 classes +int nClasses = 4; +var jenks = new JenksNaturalBreaks(data, nClasses); + +jenks.Compute(); + +Console.WriteLine($"Jenks Natural Breaks ({nClasses} classes):"); +Console.WriteLine($"Class breaks: [{string.Join(", ", jenks.Breaks.Select(b => b.ToString("F1")))}]"); +Console.WriteLine($"Goodness of variance fit: {jenks.GoodnessOfVarianceFit:F4}"); + +// Classify data +int[] classes = jenks.Classify(data); + +Console.WriteLine($"\nData classification:"); +for (int i = 0; i < Math.Min(10, data.Length); i++) +{ + Console.WriteLine($" Value {data[i]:F1} → Class {classes[i]}"); +} + +// Class statistics +for (int c = 0; c < nClasses; c++) +{ + var classData = data.Where((v, i) => classes[i] == c).ToArray(); + Console.WriteLine($"\nClass {c}:"); + Console.WriteLine($" Range: [{classData.Min():F1}, {classData.Max():F1}]"); + Console.WriteLine($" Count: {classData.Length}"); + Console.WriteLine($" Mean: {classData.Average():F1}"); +} +``` + +**Applications:** +- Choropleth map classification +- Data binning for visualization +- Natural grouping identification +- Minimizes within-class variance + +--- + +## Practical Examples + +### Example 1: Regression with GLM + +```cs +using Numerics.MachineLearning; +using Numerics.Mathematics.LinearAlgebra; + +// Predict home prices +double[,] features = { + { 1, 1500, 3, 20 }, // [intercept, sqft, bedrooms, age] + { 1, 1800, 4, 15 }, + { 1, 1200, 2, 30 }, + { 1, 2000, 4, 10 }, + { 1, 1600, 3, 25 } +}; + +double[] prices = { 250000, 320000, 190000, 380000, 270000 }; // $ + +var glm = new GeneralizedLinearModel( + new Matrix(features), + new Vector(prices), + GLMFamily.Normal, + LinkFunction.Identity +); + +glm.Train(); + +Console.WriteLine("Home Price Prediction Model:"); +Console.WriteLine($"Coefficients:"); +Console.WriteLine($" Intercept: ${glm.Parameters[0]:F0}"); +Console.WriteLine($" Per sqft: ${glm.Parameters[1]:F2}"); +Console.WriteLine($" Per bedroom: ${glm.Parameters[2]:F0}"); +Console.WriteLine($" Per year age: ${glm.Parameters[3]:F0}"); + +// Predict new home +double[,] newHome = { { 1, 1700, 3, 12 } }; +double predicted = glm.Predict(new Matrix(newHome))[0]; +double[,] interval = glm.Predict(new Matrix(newHome), alpha: 0.1); + +Console.WriteLine($"\nPrediction for 1700 sqft, 3BR, 12 years:"); +Console.WriteLine($" Predicted price: ${predicted:F0}"); +Console.WriteLine($" 90% Interval: [${interval[0, 0]:F0}, ${interval[0, 1]:F0}]"); +``` + +### Example 2: Classification Pipeline + +```cs +// Iris classification +double[,] X_train = LoadIrisFeatures(); // Load training data +double[] y_train = LoadIrisLabels(); +double[,] X_test = LoadIrisTestFeatures(); +double[] y_test = LoadIrisTestLabels(); + +// Train random forest +var rf = new RandomForest(X_train, y_train, nTrees: 100, seed: 42); +rf.Train(); + +// Evaluate +double[,] predictions = rf.Predict(X_test); +int correct = 0; +for (int i = 0; i < y_test.Length; i++) +{ + if (predictions[i, 0] == y_test[i]) + correct++; +} + +double accuracy = (double)correct / y_test.Length; + +Console.WriteLine($"Random Forest Classification:"); +Console.WriteLine($" Accuracy: {accuracy:P1}"); +Console.WriteLine($" Correct: {correct}/{y_test.Length}"); +``` + +### Example 3: Customer Segmentation + +```cs +// Customer data: [annual_spending, visit_frequency, avg_basket_size] +double[,] customers = { + { 1200, 24, 50 }, // Regular customer + { 5000, 52, 95 }, // High-value customer + { 300, 6, 45 }, // Occasional customer + { 4800, 48, 100 }, // High-value customer + { 800, 12, 65 }, // Regular customer + { 250, 4, 55 }, // Occasional customer + { 6000, 60, 105 } // VIP customer +}; + +// Cluster into 3 segments +var kmeans = new KMeans(customers, k: 3); +kmeans.Fit(); + +Console.WriteLine("Customer Segmentation:"); +for (int i = 0; i < 3; i++) +{ + var segment = Enumerable.Range(0, customers.GetLength(0)) + .Where(j => kmeans.Labels[j] == i) + .ToArray(); + + Console.WriteLine($"\nSegment {i} ({segment.Length} customers):"); + Console.WriteLine($" Avg spending: ${segment.Average(j => customers[j, 0]):F0}"); + Console.WriteLine($" Avg visits: {segment.Average(j => customers[j, 1]):F0}/year"); + Console.WriteLine($" Avg basket: ${segment.Average(j => customers[j, 2]):F0}"); +} +``` + +## Model Selection and Evaluation + +### Cross-Validation + +```cs +// Simple k-fold cross-validation +int k = 5; +int n = X.GetLength(0); +int foldSize = n / k; + +double[] accuracies = new double[k]; + +for (int fold = 0; fold < k; fold++) +{ + // Split data into train/test + var trainIndices = Enumerable.Range(0, n) + .Where(i => i < fold * foldSize || i >= (fold + 1) * foldSize) + .ToArray(); + + var testIndices = Enumerable.Range(fold * foldSize, foldSize).ToArray(); + + // Train and evaluate + // ... (extract train/test sets, train model, compute accuracy) + + accuracies[fold] = ComputeAccuracy(testIndices); +} + +Console.WriteLine($"Cross-Validation Results:"); +Console.WriteLine($" Mean accuracy: {accuracies.Average():P1}"); +Console.WriteLine($" Std dev: {Statistics.StandardDeviation(accuracies):F4}"); +``` + +### Model Comparison + +```cs +// Compare models on same dataset +var models = new[] { + ("Decision Tree", new DecisionTree(X, y)), + ("Random Forest", new RandomForest(X, y, nTrees: 50)), + ("KNN (k=3)", new KNearestNeighbors(X, y, k: 3)) +}; + +Console.WriteLine("Model Comparison:"); +foreach (var (name, model) in models) +{ + model.Train(); + double accuracy = EvaluateModel(model, X_test, y_test); + Console.WriteLine($" {name}: {accuracy:P1}"); +} +``` + +## Best Practices + +### Supervised Learning +1. **Split data** - Use train/test split or cross-validation +2. **Normalize features** - Especially for distance-based methods (KNN) +3. **Handle imbalanced classes** - Use stratified sampling or class weights +4. **Tune hyperparameters** - Grid search or random search +5. **Validate assumptions** - Check residuals for GLM +6. **Ensemble methods** - Random Forests often outperform single trees + +### Unsupervised Learning +1. **Scale features** - Clustering sensitive to feature scales +2. **Choose k carefully** - Use elbow method or silhouette scores +3. **Multiple runs** - k-Means sensitive to initialization +4. **Validate clusters** - Inspect cluster characteristics +5. **Consider GMM** - When clusters overlap or have different shapes + +--- + +## References + +[1] Nelder, J. A., & Wedderburn, R. W. M. (1972). Generalized linear models. *Journal of the Royal Statistical Society: Series A*, 135(3), 370-384. + +[2] Breiman, L., Friedman, J., Stone, C. J., & Olshen, R. A. (1984). *Classification and Regression Trees*. CRC Press. + +[3] Breiman, L. (2001). Random forests. *Machine Learning*, 45(1), 5-32. + +[4] Cover, T., & Hart, P. (1967). Nearest neighbor pattern classification. *IEEE Transactions on Information Theory*, 13(1), 21-27. + +[5] Zhang, H. (2004). The optimality of naive Bayes. *AA*, 1(2), 3. + +[6] MacQueen, J. (1967). Some methods for classification and analysis of multivariate observations. *Proceedings of the Fifth Berkeley Symposium on Mathematical Statistics and Probability*, 1(14), 281-297. + +[7] Bishop, C. M. (2006). *Pattern Recognition and Machine Learning*. Springer. + +[8] Jenks, G. F. (1967). The data model concept in statistical mapping. *International Yearbook of Cartography*, 7, 186-190. + +--- + +[← Back to Index](../index.md) diff --git a/docs/mathematics/differentiation.md b/docs/mathematics/differentiation.md new file mode 100644 index 00000000..ee13c545 --- /dev/null +++ b/docs/mathematics/differentiation.md @@ -0,0 +1,446 @@ +# Numerical Differentiation + +[← Previous: Numerical Integration](integration.md) | [Back to Index](../index.md) | [Next: Optimization →](optimization.md) + +Numerical differentiation is a fundamental technique in various scientific and engineering fields. Many optimization algorithms, such as gradient descent and Newton's method, rely on calculating the gradient of a function to find its minimum or maximum points. Some optimization methods, like quasi-Newton methods, use the Hessian matrix (the matrix of second-order derivatives) to determine the curvature of the function. Numerical differentiation provides a way to approximate both the gradient and the Hessian matrix efficiently. + +## Derivatives + +In ***Numerics***, the derivative is evaluated using the two-point (central difference) formula by default: + +```math +\frac{df}{dx} = \frac{f(x + h) - f(x - h)}{2h} +``` + +where $x$ is the input point and $h$ represents a small change in $x$. In ***Numerics***, the step size $h$ is automatically determined according to the magnitude of $x$: + +```math +\begin{equation} + h = + \begin{cases} + \mid x \mid \cdot \epsilon^\frac{1}{2} & x \neq 0\\ + \epsilon^\frac{1}{2} & x = 0\\ + \end{cases} +\end{equation} +``` + +where $\epsilon$ is double precision machine epsilon. The step size $h$ can also be user-defined. + +For example, consider the simple function: + +```math +f(x)=x^3 +``` + +Differentiating with respect to $x$ gives: + +```math +\frac{df}{dx} =3x^2 +``` + +Evaluating the function at $x=2$ yields a derivative equal to 12: + +```math +\frac{df}{dx} =3\cdot2^2 = 12 +``` + +Now, let's implement this in ***Numerics***. First, we need to reference _Numerics_ and the _Mathematics_ namespace: + +```cs +using Numerics.Mathematics; +``` + +Next, create the test function: + +```cs +/// +/// Test function: f(x) = x^3 +/// +public double FX(double x) +{ + return Math.Pow(x, 3); +} +``` + +And then compute the derivative using the _NumericalDerivative_ class and the _Derivative_ method, which uses the two-point (central difference) formula: + +```cs +double dFdx = NumericalDerivative.Derivative(FX, 2); // 11.999999949167176 +``` + +The accuracy of numerical differentiation depends on the smoothness of the function and the step size. For functions with discontinuities or sharp gradients, the two-point formula might not accurately capture the derivative at those points. If the function changes rapidly, a larger step size might miss important details. + +### Alternative Finite Difference Formulas + +The ***Numerics*** library provides additional finite difference methods: + +#### Forward Difference + +The forward difference approximation uses: + +```math +\frac{df}{dx} \approx \frac{f(x + h) - f(x)}{h} +``` + +```cs +double dFdx = NumericalDerivative.ForwardDifference(FX, 2); // 12.000001490104875 +``` + +This method is useful when you can only evaluate the function ahead of the point, but it's less accurate than the central difference method. + +#### Backward Difference + +The backward difference approximation uses: + +```math +\frac{df}{dx} \approx \frac{f(x) - f(x - h)}{h} +``` + +```cs +double dFdx = NumericalDerivative.BackwardDifference(FX, 2); // 11.999998508229537 +``` + +This method is useful when you can only evaluate the function behind the point. + +#### Central Difference + +The central difference method (which is also used by the `Derivative` method) provides better accuracy: + +```cs +double dFdx = NumericalDerivative.CentralDifference(FX, 2); // 11.999999949167176 +``` + +### Ridder's Method + +The ***Numerics*** library also provides Ridder's method for computing the numerical derivative, which can be more accurate than the two-point method in some cases. This method also outputs an estimate of the error in the derivative: + +```cs +double dFdx = NumericalDerivative.RiddersMethod(FX, 2, out var err); // 11.99999994392223 +Console.WriteLine($"Derivative: {dFdx}"); +Console.WriteLine($"Estimated error: {err}"); +``` + +Ridder's method uses Richardson extrapolation to refine the estimate by evaluating the derivative at multiple step sizes and extrapolating to zero step size. + +### Custom Step Size + +You can specify a custom step size if the automatic determination is not suitable for your problem: + +```cs +double h = 0.001; // Custom step size +double dFdx = NumericalDerivative.Derivative(FX, 2, h); +``` + +## Second Derivatives + +The ***Numerics*** library provides methods for computing second derivatives. The second derivative measures the rate of change of the first derivative, or the curvature of the function. + +### Central Second Derivative + +The central difference approximation for the second derivative is: + +```math +\frac{d^2f}{dx^2} \approx \frac{f(x + h) - 2f(x) + f(x - h)}{h^2} +``` + +```cs +double d2Fdx2 = NumericalDerivative.SecondDerivative(FX, 2); // 11.999997613071552 +``` + +For our test function $f(x) = x^3$, the second derivative is $\frac{d^2f}{dx^2} = 6x$, so at $x=2$, we expect 12. + +### Forward Second Derivative + +```cs +double d2Fdx2 = NumericalDerivative.SecondDerivativeForward(FX, 2); +``` + +### Backward Second Derivative + +```cs +double d2Fdx2 = NumericalDerivative.SecondDerivativeBackward(FX, 2); +``` + +## Gradient + +The gradient is a vector of first-order partial derivatives of a scalar-valued function: + +```math +\nabla f = \left(\frac{\partial f}{\partial x_1}, \frac{\partial f}{\partial x_2}, \ldots , \frac{\partial f}{\partial x_n}\right) +``` + +For example, consider a function with three variables: + +```math +f(x,y,z)=x^3+y^4+z^5 +``` + +Differentiating with respect to each variable gives: + +```math +\frac{\partial f}{\partial x} =3x^2 \quad \frac{\partial f}{\partial y} =4y^3 \quad \frac{\partial f}{\partial z} =5z^4 +``` + +Evaluating the function at $x=2$, $y=2$, and $z=2$ yields partial derivatives equal to $12$, $32$, and $80$, respectively: + +```math +\frac{\partial f}{\partial x} =3\cdot2^2=12 \quad \frac{\partial f}{\partial y} =4\cdot2^3=32 \quad \frac{\partial f}{\partial z} =5\cdot2^4=80 +``` + +The gradient is the vector of these first-order partial derivatives: + +```math +\nabla f = \{ 12, 32, 80\} +``` + +In ***Numerics***, the gradient is computed using the two-point formula described earlier. Let's create the test function: + +```cs +/// +/// Test function: f(x, y, z) = x^3 + y^4 + z^5 +/// +public double FXYZ(double[] x) +{ + return Math.Pow(x[0], 3) + Math.Pow(x[1], 4) + Math.Pow(x[2], 5); +} +``` + +And then compute the gradient: + +```cs +double[] gradient = NumericalDerivative.Gradient(FXYZ, new double[] {2, 2, 2}); +// {12.000000019411923, 32.000000014301264, 79.999999754774166} + +Console.WriteLine($"∂f/∂x = {gradient[0]:F6}"); +Console.WriteLine($"∂f/∂y = {gradient[1]:F6}"); +Console.WriteLine($"∂f/∂z = {gradient[2]:F6}"); +``` + +## Hessian + +The Hessian is a square matrix of second-order partial derivatives of a scalar-valued function: + +```math +\mathbf{H}_{i,j} = \frac{\partial^2 f}{\partial x_i\partial x_j} +``` + +Using the same 3-variable function as before $f(x,y,z)=x^3+y^4+z^5$, the Hessian matrix becomes: + +```math +\mathbf{H} = + \left[ {\begin{array}{ccc} + \frac{\partial^2 f}{\partial x\partial x} & \frac{\partial^2 f}{\partial x\partial y} & \frac{\partial^2 f}{\partial x\partial z} \\ + \frac{\partial^2 f}{\partial y\partial x} & \frac{\partial^2 f}{\partial y\partial y} & \frac{\partial^2 f}{\partial y\partial z} \\ + \frac{\partial^2 f}{\partial z\partial x} & \frac{\partial^2 f}{\partial z\partial y} & \frac{\partial^2 f}{\partial z\partial z} \\ + \end{array} } \right] +``` + +Since none of the variables interact with each other, the Hessian reduces to: + +```math +\mathbf{H} = + \left[ {\begin{array}{ccc} + \frac{\partial^2 f}{\partial x^2} & 0 & 0 \\ + 0 & \frac{\partial^2 f}{\partial y^2} & 0 \\ + 0 & 0 & \frac{\partial^2 f}{\partial z^2} \\ + \end{array} } \right] +``` + +Taking the second-order derivatives with respect to each variable gives: + +```math +\frac{\partial^2 f}{\partial x^2} =6x \quad \frac{\partial^2 f}{\partial y^2} =12y^2 \quad \frac{\partial^2 f}{\partial z^2} =20z^3 +``` + +Evaluating the function at $x=2$, $y=2$, and $z=2$ yields: + +```math +\frac{\partial^2 f}{\partial x^2} =6\cdot2=12 \quad \frac{\partial^2 f}{\partial y^2} =12\cdot2^2=48 \quad \frac{\partial^2 f}{\partial z^2} =20\cdot2^3=160 +``` + +Now, to compute the Hessian in ***Numerics***, simply do the following: + +```cs +double[,] hessian = NumericalDerivative.Hessian(FXYZ, new double[] {2, 2, 2}); +// [0,0] = 12.000009765258449 +// [0,1] = 8.5443864603330376E-06 +// [0,2] = 0 +// [1,0] = 8.5443864603330376E-06 +// [1,1] = 48.000004883487954 +// [1,2] = 0 +// [2,0] = 0 +// [2,1] = 0 +// [2,2] = 159.99999349326262 + +Console.WriteLine("Hessian matrix:"); +for (int i = 0; i < 3; i++) +{ + for (int j = 0; j < 3; j++) + { + Console.Write($"{hessian[i, j],12:F6} "); + } + Console.WriteLine(); +} +``` + +The small off-diagonal elements (on the order of $10^{-6}$) are numerical errors and should be zero for this particular function. + +### Example: Function with Interacting Variables + +Now, consider another example where the function variables interact: + +```math +f(x,y)=x^3-2xy-y^6 +``` + +The Hessian matrix is: + +```math +\mathbf{H} = + \left[ {\begin{array}{cc} + \frac{\partial^2 f}{\partial x\partial x} & \frac{\partial^2 f}{\partial x \partial y} \\ + \frac{\partial^2 f}{\partial y\partial x} & \frac{\partial^2 f}{\partial y \partial y} \\ + \end{array} } \right] +``` + +Taking the second-order derivatives with respect to each variable gives: + +```math +\frac{\partial^2 f}{\partial x^2} =6x \quad \frac{\partial^2 f}{\partial x \partial y} = \frac{\partial^2 f}{\partial y \partial x} = -2 \quad \frac{\partial^2 f}{\partial y^2} = -30y^4 +``` + +Evaluating the function at $x=1$ and $y=2$ yields: + +```math +\frac{\partial^2 f}{\partial x^2} =6\cdot1=6 \quad \frac{\partial^2 f}{\partial x \partial y} = -2 \quad \frac{\partial^2 f}{\partial y^2} =-30\cdot2^4=-480 +``` + +Create the test function: + +```cs +/// +/// Test function: f(x, y) = x^3 - 2xy - y^6 +/// +public double FXY(double[] x) +{ + return Math.Pow(x[0], 3) - 2 * x[0] * x[1] - Math.Pow(x[1], 6); +} +``` + +Now, compute the Hessian using ***Numerics***: + +```cs +double[,] hessian = NumericalDerivative.Hessian(FXY, new double[] {1, 2}); +// [0,0] = 5.9999414101667655 +// [0,1] = -2.000001627543075 +// [1,0] = -2.000001627543075 +// [1,1] = -480.00004883487958 + +Console.WriteLine("Hessian matrix:"); +Console.WriteLine($"[{hessian[0,0],10:F4} {hessian[0,1],10:F4}]"); +Console.WriteLine($"[{hessian[1,0],10:F4} {hessian[1,1],10:F4}]"); +``` + +Note that the Hessian is symmetric, as expected from the equality of mixed partial derivatives (Schwarz's theorem), and the off-diagonal elements correctly capture the interaction between variables. + +## Jacobian + +The Jacobian matrix represents the first-order partial derivatives of a vector-valued function. For a function $\mathbf{f}: \mathbb{R}^n \rightarrow \mathbb{R}^m$, the Jacobian is an $m \times n$ matrix: + +```math +\mathbf{J}_{i,j} = \frac{\partial f_i}{\partial x_j} +``` + +The ***Numerics*** library provides two overloads for computing the Jacobian: + +```cs +// For a single output function f(x_i, x[]) +double[,] jacobian1 = NumericalDerivative.Jacobian( + (xi, x) => /* function of xi and x[] */, + xValues, + point +); + +// For a vector-valued function f(x[]) -> y[] +double[,] jacobian2 = NumericalDerivative.Jacobian( + x => /* returns double[] */, + point +); +``` + +Example of computing a Jacobian for a system of equations: + +```cs +// System: f1(x,y) = x^2 + y^2, f2(x,y) = xy +double[] F(double[] vars) +{ + double x = vars[0]; + double y = vars[1]; + return new double[] + { + x * x + y * y, // f1 + x * y // f2 + }; +} + +var point = new double[] { 2, 3 }; +double[,] jacobian = NumericalDerivative.Jacobian(F, point); + +// Jacobian at (2,3): +// [∂f1/∂x ∂f1/∂y] [2x 2y] [4 6] +// [∂f2/∂x ∂f2/∂y] = [ y x] = [3 2] +``` + +## Calculating Step Size + +The `CalculateStepSize` method computes an appropriate step size for numerical differentiation based on the magnitude of the point and the order of the derivative: + +```cs +double h = NumericalDerivative.CalculateStepSize(x: 2.0, order: 1); +// Returns approximately 1.49e-08 for first derivative + +double h2 = NumericalDerivative.CalculateStepSize(x: 2.0, order: 2); +// Returns approximately 3.45e-06 for second derivative +``` + +The step size is calculated as: + +```math +h = |x| \cdot \epsilon^{1/(1+\text{order})} +``` + +where $\epsilon$ is machine epsilon. For $x=0$, the formula simplifies to $h = \epsilon^{1/(1+\text{order})}$. + +## Best Practices + +1. **Use Central Differences**: When possible, use the central difference method (default `Derivative` method) as it provides better accuracy than forward or backward differences. + +2. **Ridder's Method for Critical Applications**: When you need both a derivative and an error estimate, use Ridder's method. + +3. **Automatic Step Sizing**: The default automatic step sizing works well for most problems. Only specify a custom step size if you have specific numerical issues. + +4. **Beware of Noise**: Numerical differentiation amplifies noise in function evaluations. If your function has numerical noise (e.g., from Monte Carlo simulations), consider smoothing or using a larger step size. + +5. **Check for Symmetry**: For Hessian matrices, check that the result is symmetric (within numerical tolerance). Significant asymmetry indicates numerical issues. + +6. **Scale Considerations**: For problems with variables at very different scales, consider normalizing variables before computing derivatives. + +## Accuracy Considerations + +The central difference formula has truncation error $O(h^2)$ and roundoff error $O(\epsilon/h)$, where $\epsilon$ is machine epsilon. The optimal step size balances these errors at approximately $h \approx \epsilon^{1/3}$ for first derivatives and $h \approx \epsilon^{1/4}$ for second derivatives. The automatic step sizing in ***Numerics*** uses $h \approx \epsilon^{1/2}$, which is a conservative choice that works well in practice. + +For the second derivative, the truncation error is $O(h^2)$ and roundoff error is $O(\epsilon/h^2)$, making it more sensitive to numerical noise than first derivatives. + +--- + +## References + +The numerical differentiation methods implemented in ***Numerics*** are based on standard finite difference formulas well-documented in numerical analysis literature [[1]](#1). Ridder's method for derivative estimation with error bounds was introduced by Ridders [[2]](#2). + +[1] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, *Numerical Recipes: The Art of Scientific Computing*, 3rd ed., Cambridge, UK: Cambridge University Press, 2007. + +[2] C. J. F. Ridders, "Accurate computation of F'(x) and F'(x) F''(x)," *Advances in Engineering Software*, vol. 4, no. 2, pp. 75-76, 1982. + +--- + +[← Previous: Numerical Integration](integration.md) | [Back to Index](../index.md) | [Next: Optimization →](optimization.md) diff --git a/docs/mathematics/integration.md b/docs/mathematics/integration.md new file mode 100644 index 00000000..19a82dca --- /dev/null +++ b/docs/mathematics/integration.md @@ -0,0 +1,608 @@ +# Numerical Integration + +[← Back to Index](../index.md) | [Next: Numerical Differentiation →](differentiation.md) + +Numerical integration, also known as numerical quadrature, is a fundamental technique for approximating definite integrals. It has wide-ranging applications in various scientific and engineering fields. For example, in statistics, the expected value of a random variable is calculated using an integral, and numerical integration can be employed to approximate this expected value. Many problems in engineering and physics cannot be solved analytically and must rely on numerical methods to approximate solutions. + +## Single Dimension Integration + +The ***Numerics*** library provides several methods for performing numerical integration on single dimensional integrands. Each algorithm computes an approximation to a definite integral of the form: + +```math +I = \int\limits_{a}^{b}f(x) \cdot dx +``` + +For the first example, let's consider a simple function with a single variable: + +```math +f(x)=x^3 +``` + +Integrating from $a=0$ to $b=1$ yields the exact solution: + +```math +\int\limits_{0}^{1}f(x) \cdot dx = \frac{1}{4}x^4 \biggr|_0^1 = \frac{1}{4} \cdot 1^4 - 0 = 0.25 +``` + +Definite integrals can be numerically solved using Riemann sums, such as the trapezoidal rule. This method works by approximating the region under the function $f(x)$ as a trapezoid and calculating its area: + +```math +I =\int\limits_{0}^{1}f(x) \cdot dx \approx \left(\frac{f(a) + f(b)}{2} \right)\cdot(b-a) +``` + +This approximation can be improved by partitioning (or binning) the integration interval $[a,b]$ and then applying the trapezoidal rule to each subinterval and summing the results: + +```math +I =\int\limits_{0}^{1}f(x) \cdot dx \approx \sum_{i=1}^{N} \left(\frac{f(x_{i-1}) + f(x_i)}{2} \right)\cdot(x_i-x_{i-1}) +``` + +Now, let's implement this in ***Numerics***. First, we need to reference the _Integration_ namespace: + +```cs +using Numerics.Mathematics.Integration; +``` + +Next, create the test function: + +```cs +/// +/// Test function: f(x) = x^3 +/// +public double FX(double x) +{ + return Math.Pow(x, 3); +} +``` + +### Trapezoidal Rule + +The _Integration_ class is a static class that contains the Midpoint Rule, Trapezoidal Rule, Simpson's Rule, and the 10-point Gauss-Legendre integration methods. Let's first compute the integral using the Trapezoidal Rule with 10 bins (or steps): + +```cs +double result = Integration.TrapezoidalRule(FX, 0, 1, 10); // 0.25249999999999995 +``` + +Increasing the number of steps will increase the accuracy. Let's compute it again using 1,000 steps: + +```cs +double result = Integration.TrapezoidalRule(FX, 0, 1, 1000); // 0.25000025000000053 +``` + +We can see that this is much more precise. + +Alternatively, the ***Numerics*** library provides a _TrapezoidalRule_ class that extends the basic static method and provides additional functionality for computing integration error estimates: + +```cs +var trap = new TrapezoidalRule(FX, 0, 1, intervals: 100); +trap.Integrate(); +double result = trap.Result; // 0.25024999999999995 +Console.WriteLine($"Function evaluations: {trap.FunctionEvaluations}"); +``` + +### Simpson's Rule + +Simpson's Rule provides more accurate approximations than the Trapezoidal Rule by using quadratic interpolation. The formula approximates the region under the function $f(x)$ as a weighted average of the trapezoidal and midpoint methods: + +```math +I =\int\limits_{a}^{b}f(x) \cdot dx \approx \left[f(a) + 4 \cdot f \left(\frac{a+b}{2} \right) + f(b) \right]\cdot \left(\frac{b-a}{6} \right) +``` + +Similar to the Trapezoidal Rule, the accuracy is improved by partitioning the integration interval. Using the static method: + +```cs +double result = Integration.SimpsonsRule(FX, 0, 1, 10); // 0.25 +``` + +Or using the class-based approach: + +```cs +var simpson = new SimpsonsRule(FX, 0, 1, intervals: 100); +simpson.Integrate(); +double result = simpson.Result; // 0.25 +``` + +**Error**: Simpson's Rule is fourth-order accurate, $O(h^4)$, making it significantly more accurate than the Trapezoidal Rule for smooth functions. + +### Gauss-Legendre Quadrature + +The Gauss-Legendre method uses optimal polynomial quadrature for smooth functions [[1]](#1). It evaluates the function at specific points (roots of Legendre polynomials) with corresponding weights to achieve high accuracy: + +```math +\int_{-1}^{1} f(x)\,dx \approx \sum_{i=1}^{n} w_i f(x_i) +``` + +where $x_i$ are roots of Legendre polynomials and $w_i$ are corresponding weights. The ***Numerics*** library provides a 10-point Gauss-Legendre method: + +```cs +double result = Integration.GaussLegendre(FX, 0, 1); // 0.25 +``` + +**Error**: The 10-point Gauss-Legendre method is exact for polynomials of degree 19 or less. + +### Midpoint Rule + +The Midpoint Rule evaluates the function at the midpoint of each interval: + +```cs +double result = Integration.Midpoint(FX, 0, 1, 100); // 0.24997500000000012 +``` + +## Adaptive Integration + +The challenge with static numerical integration methods, such as the trapezoidal rule mentioned above, is that the user must specify both the limits of integration and the number of integration bins. If the integrand function has subregions with high variance, this approach can lead to large approximation errors. Many real-world integrand functions have substantial weight concentrated in narrow subregions, resulting in wasted integration bins in areas that contribute little to the total weight. + +Adaptive integration, a more refined numerical integration method, adjusts subintervals within the integration bounds based on the behavior of the function. These methods concentrate subintervals in regions that contribute the most to the integral, overcoming the limitations of static approaches. + +The ***Numerics*** library provides three adaptive integration routines: the Adaptive Simpson's Rule, the Adaptive Gauss-Lobatto method, and the Adaptive Gauss-Kronrod method. + +### Adaptive Simpson's Rule + +The Adaptive Simpson's Rule (ASR) algorithm subdivides the integration interval recursively until a user-defined tolerance is achieved. In each subinterval, Simpson's Rule is used to approximate the region under the function. The criterion for determining when to stop subdividing an interval is: + +```math +\frac{1}{15} \cdot \left| S(a, m) + S(m, b) - S(a,b) \right| \leq \epsilon + \epsilon \cdot \left| S(a, b) \right| +``` + +where $[a,b]$ is the integration interval, $m = \frac{a+b}{2}$, $S(\cdot)$ represents Simpson's Rule evaluated at those intervals, and $\epsilon$ is the absolute and relative error tolerance for the interval. Each subinterval is recursively subdivided and evaluated until the specified tolerance is met. + +More details on the ASR method can be found in [[1]](#1). + +To use the ASR method, follow these steps: + +```cs +var asr = new AdaptiveSimpsonsRule(FX, 0, 1); +asr.Integrate(); +double result = asr.Result; // 0.25 +Console.WriteLine($"Function evaluations: {asr.FunctionEvaluations}"); +``` + +For this simple test function, the ASR method requires only 5 function evaluations to converge with an absolute and relative tolerance of $1 \times 10^{-8}$. It should be noted that the ASR method gives exact results for 3rd degree (or less) polynomials. + +You can customize the tolerance and iteration limits: + +```cs +var asr = new AdaptiveSimpsonsRule(FX, 0, 1); +asr.RelativeTolerance = 1e-10; +asr.AbsoluteTolerance = 1e-10; +asr.MaxIterations = 1000; +asr.Integrate(); +double result = asr.Result; +``` + +### Adaptive Gauss-Lobatto + +The Adaptive Gauss-Lobatto (AGL) method includes endpoints in quadrature nodes, making it useful for integrands with endpoint singularities [[2]](#2). Alternatively, we can use the AGL method as follows: + +```cs +var agl = new AdaptiveGaussLobatto(FX, 0, 1); +agl.Integrate(); +double result = agl.Result; // 0.24999999999999997 +Console.WriteLine($"Function evaluations: {agl.FunctionEvaluations}"); +``` + +The AGL method requires 18 function evaluations to converge given an absolute and relative tolerance of $1 \times 10^{-8}$. + +### Adaptive Gauss-Kronrod + +The Adaptive Gauss-Kronrod method pairs a 10-point Gauss rule with a 21-point Kronrod extension for error estimation [[2]](#2). This method efficiently reuses function evaluations to provide accurate error estimates: + +```cs +var gk = new AdaptiveGaussKronrod(FX, 0, 1); +gk.RelativeTolerance = 1e-12; +gk.Integrate(); +double result = gk.Result; +Console.WriteLine($"Estimated error: {gk.Status}"); +``` + +**Advantages**: Efficient error estimation, reuses function evaluations from the Gauss rule in the Kronrod extension. + +### Example: Computing the Mean of a Gamma Distribution + +For a more challenging test problem, let's compute the mean of a Gamma distribution with a scale of $\theta = 10$ and shape $\kappa = 5$. The true mean of the distribution is given by: + +```math +\mu = \theta \cdot \kappa = 50 +``` + +The probability density function (PDF) of the Gamma distribution is: + +```math +f(x) = \frac{1}{\Gamma(\kappa)\theta^{\kappa}}x^{\kappa-1}e^{-\frac{x}{\theta}} +``` + +The mean of a continuous probability distribution is computed as: + +```math +\mu = \mathbb{E} [X] = \int\limits_{-\infty}^{\infty} x \cdot f(x) \cdot dx +``` + +Now, let's implement this in ***Numerics***. First, we need to reference the _Integration_ and _Distributions_ namespaces: + +```cs +using Numerics.Mathematics.Integration; +using Numerics.Distributions; +``` + +Then, using the ASR method, follow these steps: + +```cs +// Create the Gamma distribution and set the integration limits +var gamma = new GammaDistribution(10, 5); +double a = gamma.InverseCDF(1E-16); // Lower limit based on a very small cumulative probability +double b = gamma.InverseCDF(1 - 1E-16); // Upper limit based on a near-1 cumulative probability + +// Define the integrand function +double I(double x) +{ + return x * gamma.PDF(x); +} + +// Perform the integration +var asr = new AdaptiveSimpsonsRule(I, a, b); +asr.Integrate(); +double result = asr.Result; // 50.000000004866415 +Console.WriteLine($"Function evaluations: {asr.FunctionEvaluations}"); +``` + +The ASR method requires 365 function evaluations to reach convergence. + +## Two-Dimensional Integration + +### Adaptive Simpson's 2D + +The ***Numerics*** library extends adaptive Simpson's rule to rectangular domains in two dimensions: + +```cs +using Numerics.Mathematics.Integration; + +// Integrate f(x,y) = exp(-(x² + y²)) over [-3,3] × [-3,3] +Func f2d = (x, y) => Math.Exp(-(x * x + y * y)); + +var simpson2d = new AdaptiveSimpsonsRule2D(f2d, -3, 3, -3, 3); +simpson2d.RelativeTolerance = 1e-8; +simpson2d.Integrate(); + +Console.WriteLine($"Result: {simpson2d.Result:F10}"); // Should be approximately π +Console.WriteLine($"Function evaluations: {simpson2d.FunctionEvaluations}"); +``` + +## Multidimensional Integration + +Multidimensional integration, also known as multiple or multivariate integration, involves evaluating integrals over functions of more than one variable. Instead of integrating over a single interval, as in one-dimensional integration, you integrate over a region in a multidimensional space. This is commonly used in fields like physics, engineering, and statistics where systems often depend on multiple variables. + +Solving multidimensional integrals is computationally demanding. If traditional, nonadaptive numerical integration techniques were used, the solution would require $K^D$ iterations, where $K$ is the number of integration steps (or bins) and $D$ is the number of dimensions. If there were 100 integration steps and 5 dimensions, the solution would need 10 billion iterations. + +To avoid these computation limitations, the ***Numerics*** library provides three multidimensional integration routines: Monte Carlo, Miser, and VEGAS. Each algorithm computes an approximation to a definite integral of the form: + +```math +I = \int_{\Omega} f(\mathbf{x}) \, d\mathbf{x} +``` + +where $\Omega$ is the integration domain in $D$-dimensional space. + +### Monte Carlo Integration + +Monte Carlo integration uses random sampling to approximate integrals. For high-dimensional integrals, Monte Carlo methods become essential [[3]](#3): + +```math +\int_\Omega f(\mathbf{x})\,d\mathbf{x} \approx V \cdot \frac{1}{N}\sum_{i=1}^{N}f(\mathbf{x}_i) +``` + +where $V$ is the volume of the integration domain, $N$ is the number of samples, and $\mathbf{x}_i$ are random points uniformly distributed in $\Omega$. + +Let's use a simple 2D test problem to compute $\pi$. Consider the function: + +```math +f(x, y) = +\begin{cases} +1 & \text{if } x^2 + y^2 \leq 1 \\ +0 & \text{otherwise} +\end{cases} +``` + +Integrating this function over the domain $[-1, 1] \times [-1, 1]$ gives the area of a unit circle, which equals $\pi$: + +```math +\int\limits_{-1}^{1}\int\limits_{-1}^{1}f(x,y) \, dy \, dx = \pi +``` + +Now, let's implement this in ***Numerics***: + +```cs +using Numerics.Mathematics.Integration; +using Numerics.Sampling; + +// Define the integrand function +double PI(double[] x) +{ + if (x[0] * x[0] + x[1] * x[1] <= 1) + return 1.0; + return 0.0; +} + +// Set integration bounds +var a = new double[] { -1, -1 }; +var b = new double[] { 1, 1 }; + +// Create and configure the Monte Carlo integrator +var mc = new MonteCarloIntegration(PI, 2, a, b); +mc.Random = new MersenneTwister(12345); // Set the random number generator for repeatability +mc.MaxIterations = 100000; +mc.Integrate(); + +double result = mc.Result; // 3.13824 +Console.WriteLine($"Result: {result:F6}"); +Console.WriteLine($"Function evaluations: {mc.FunctionEvaluations}"); +``` + +With 100,000 samples, we see that the result is close but still has a noticeable error. Now, let's run it again with the default setting, where the maximum iterations are $N=100,000,000$: + +```cs +var mc = new MonteCarloIntegration(PI, 2, a, b); +mc.Random = new MersenneTwister(12345); // Set the random number generator for repeatability +mc.Integrate(); +double result = mc.Result; // 3.1412028 +Console.WriteLine($"Result: {result:F8}"); +``` + +This result is much closer to the true value of $\pi$. + +Unlike traditional methods, the complexity of Monte Carlo integration grows slowly with the number of dimensions, making it particularly useful for high-dimensional problems. The Monte Carlo approach is simple to implement in higher dimensions and can handle irregular domains and complex integrands. However, it converges slowly; the error decreases as $O \left( \frac{1}{\sqrt{N}} \right)$, meaning to halve the error, you need to quadruple the number of samples. + +**Error**: $O(1/\sqrt{N})$ - independent of dimension. + +### MISER (Recursive Stratified Sampling) + +The Miser integration algorithm is a type of adaptive Monte Carlo method designed for efficient evaluation of multidimensional integrals. It is particularly well-suited for integrands that exhibit regions of high variance, as it allocates more samples to areas where the integrand contributes more to the total integral. The algorithm combines the flexibility of Monte Carlo integration with adaptive subdivision techniques to enhance accuracy and efficiency in complex, high-dimensional problems. + +Key Concepts of the Miser Algorithm: + +1. **Adaptive Subdivision:** Miser improves upon basic Monte Carlo integration by recursively subdividing the integration domain into smaller regions. The algorithm then allocates more samples to the subregions where the integrand has higher variance, focusing computational resources where they are most needed. + +2. **Variance-Based Sampling:** The Miser algorithm estimates the variance of the integrand in different subregions. Subregions with higher variance are given a greater proportion of the total samples. This reduces the error by refining the integral in the parts of the domain that contribute the most to the integral's value. + +For more details on the stratified sampling and the Miser algorithm, see [[2]](#2) and [[3]](#3). + +Now, let's solve the $\pi$ test function using Miser with $N=100,000$ iterations: + +```cs +var miser = new Miser(PI, 2, a, b); +miser.Random = new MersenneTwister(12345); // Set the random number generator for repeatability +miser.MaxIterations = 100000; +miser.Integrate(); +double result = miser.Result; // 3.1420673978501474 +Console.WriteLine($"Result: {result:F10}"); +Console.WriteLine($"Function evaluations: {miser.FunctionEvaluations}"); +``` + +With the same number of samples, Miser produces a more accurate result with smaller variance than basic Monte Carlo integration. + +### VEGAS (Adaptive Importance Sampling) + +The VEGAS integration method is a Monte Carlo-based numerical integration technique designed for efficiently evaluating high-dimensional integrals, particularly when dealing with functions that have significant variability in certain regions of the integration space [[4]](#4) [[5]](#5). It is widely used in computational physics and other fields requiring the evaluation of complex integrals. + +Key Features of the VEGAS Algorithm: + +1. **Importance Sampling:** VEGAS employs importance sampling to focus the integration effort on regions where the integrand contributes most significantly to the integral. This helps to improve the accuracy of the integral estimate while reducing variance. + +2. **Adaptive Grid:** The algorithm adapts the sampling grid based on the characteristics of the integrand. It divides the integration domain into smaller subregions, and the sampling density is adjusted according to the estimated contribution of each region to the overall integral. + +3. **Iterative Approach:** VEGAS works in iterations, refining the sampling strategy with each pass. In the first iteration, a uniform grid is typically used. After evaluating the integrand, the method estimates the probability distribution of the function values, allowing the grid to be adjusted in subsequent iterations to better capture areas with higher contributions. + +For more details on the importance sampling and the VEGAS algorithm, see [[2]](#2) and [[3]](#3). + +Now, let's solve the $\pi$ test function using VEGAS. The VEGAS method requires the integrand function to take a point $\mathbf{x}$ and an importance sampling weight $w$ as inputs. For this example, we will reuse the previous test function without utilizing the weight value: + +```cs +var vegas = new Vegas((x, w) => { return PI(x); }, 2, a, b); +vegas.Random = new MersenneTwister(12345); // Set the random number generator for repeatability +vegas.Integrate(); +double result = vegas.Result; // 3.1418009008273735 +Console.WriteLine($"Result: {result:F10}"); +Console.WriteLine($"Function evaluations: {vegas.FunctionEvaluations}"); +``` + +In ***Numerics***, the VEGAS method iteratively adapts and refines the grid until convergence to a relative tolerance of $1 \times 10^{-3}$ is achieved. For this test problem, only 19,600 function evaluations are required to reach convergence. + +**Advantages**: Excellent for peaked integrands, provides convergence diagnostics through chi-squared statistics. + +### Example: Mean of Sum of Independent Normal Distributions + +For a more challenging test, let's compute the mean of the sum of independent Normal distributions. The multidimensional integrand function can be written as: + +```math +f(x_1, \cdots ,x_D) = \sum_{k=1}^{D} x_k \cdot \prod_{k=1}^{D} \phi (x_k | \mu_k, \sigma_k) +``` + +where $\phi(\cdot)$ is the PDF of the $k$-th Normal distribution with a mean $\mu_k$ and standard deviation $\sigma_k$. The exact solution for the mean of the sum of these random variables is: + +```math +E[X] = \sum_{k=1}^{D} \mu_k +``` + +For this test, we use five Normal distributions with means $\mu = [10, 30, 17, 99, 68]$ and standard deviations $\sigma = [2, 15, 5, 14, 7]$. Therefore, the exact solution is: + +```math +E[X] = 10+30+17+99+68=224 +``` + +Here's how we can implement this in the ***Numerics*** library: + +```cs +using Numerics.Mathematics.Integration; +using Numerics.Distributions; +using Numerics.Sampling; + +// Create the Normal distributions and set the integration limits +var mu = new double[] { 10, 30, 17, 99, 68 }; +var sigma = new double[] { 2, 15, 5, 14, 7 }; +var dists = new Normal[5]; +var min = new double[5]; +var max = new double[5]; +for (int i = 0; i < 5; i++) +{ + dists[i] = new Normal(mu[i], sigma[i]); + min[i] = dists[i].InverseCDF(1E-16); // Lower limit based on a very small cumulative probability + max[i] = dists[i].InverseCDF(1 - 1E-16); // Upper limit based on a near-1 cumulative probability +} + +// Define the integrand function +double SumOfNormals(double[] x, double w) +{ + double sum = 0; + double prod = 1; + for (int i = 0; i < mu.Length; i++) + { + sum += x[i]; + prod *= dists[i].PDF(x[i]); + } + return sum * prod; +} + +// Perform the integration +var vegas = new Vegas(SumOfNormals, 5, min, max); +vegas.Integrate(); +double result = vegas.Result; // 224.07455771892427 +Console.WriteLine($"Result: {result:F8}"); +Console.WriteLine($"Function evaluations: {vegas.FunctionEvaluations}"); +``` + +For this more complex test problem, 468,750 function evaluations are required to achieve convergence. + +## Probability Space Integration + +For risk analysis, it's often useful to integrate over probability distributions. The ***Numerics*** library allows you to compute expected values and failure probabilities by transforming the integration domain to probability space: + +```cs +using Numerics.Distributions; +using Numerics.Mathematics.Integration; +using Numerics.Sampling; + +// Expected value E[g(X)] where X ~ Normal(0,1) +var normal = new Normal(0, 1); + +double Expectation(double[] p) +{ + // Transform from [0,1] to real line via inverse CDF + double x = normal.InverseCDF(p[0]); + + // Function of interest: g(x) = x² + double gx = x * x; // E[X²] = 1 for standard normal + + return gx; // Weight is implicitly 1 in probability space +} + +var mc = new MonteCarloIntegration(Expectation, 1, new[] { 0.0 }, new[] { 1.0 }); +mc.MaxIterations = 100000; +mc.Integrate(); + +Console.WriteLine($"E[X²] = {mc.Result:F6}"); // Should be ≈ 1 +``` + +### Failure Probability Integration + +```cs +// P(g(X) < 0) where g is a limit state function +double FailureIndicator(double[] p) +{ + double x1 = new Normal(0, 1).InverseCDF(p[0]); + double x2 = new Normal(0, 1).InverseCDF(p[1]); + + // Limit state function + double g = 5 - x1 - x2; + + // Indicator function + return g < 0 ? 1.0 : 0.0; +} + +var vegas = new Vegas((x, w) => FailureIndicator(x), 2, new[] { 0.0, 0.0 }, new[] { 1.0, 1.0 }); +vegas.MaxIterations = 50000; +vegas.Integrate(); + +Console.WriteLine($"Failure probability: {vegas.Result:E4}"); +``` + +## Choosing an Integration Method + +| Scenario | Recommended Method | Notes | +|----------|-------------------|-------| +| 1D, smooth function | Adaptive Gauss-Kronrod | Best accuracy with error estimates | +| 1D, unknown smoothness | Adaptive Simpson's | Reliable general-purpose method | +| 1D, endpoint singularity | Adaptive Gauss-Lobatto | Includes endpoints in quadrature | +| 1D, high accuracy needed | Adaptive Gauss-Kronrod | Tight tolerance, efficient reuse of evaluations | +| 1D, quick estimate | Static Simpson's or Gauss-Legendre | Fast but requires choosing number of points | +| 2D, rectangular domain | Adaptive Simpson's 2D | Direct extension of 1D method | +| 3D-6D, smooth | VEGAS | Adaptive importance sampling excels here | +| High-D (>6), any | VEGAS or Monte Carlo | Only practical methods for very high dimensions | +| Peaked integrand | VEGAS | Adaptive grid focuses on important regions | +| High variance regions | Miser | Stratified sampling concentrates effort | +| Probability integrals | Monte Carlo or VEGAS in [0,1] | Transform via inverse CDF | + +## Common Properties for Integrators + +All integration classes (except the static `Integration` methods) inherit from the `Integrator` base class and share these properties: + +### Input Properties +- `MinIterations`: Minimum number of iterations allowed (default = 1) +- `MaxIterations`: Maximum number of iterations allowed (default = 10,000,000) +- `MinFunctionEvaluations`: Minimum function evaluations allowed (default = 1) +- `MaxFunctionEvaluations`: Maximum function evaluations allowed (default = 10,000,000) +- `AbsoluteTolerance`: Desired absolute tolerance (default = 1E-8) +- `RelativeTolerance`: Desired relative tolerance (default = 1E-8) +- `ReportFailure`: Whether to throw exception on convergence failure (default = true) + +### Output Properties +- `Result`: The computed integral value +- `Iterations`: Number of iterations performed +- `FunctionEvaluations`: Number of function evaluations performed +- `Status`: Integration status (Success, Failure, MaxIterationsReached, etc.) + +Example of using these properties: + +```cs +var integrator = new AdaptiveSimpsonsRule(myFunction, 0, 1); +integrator.RelativeTolerance = 1e-12; +integrator.MaxIterations = 5000; +integrator.ReportFailure = false; // Don't throw exception on failure +integrator.Integrate(); + +if (integrator.Status == IntegrationStatus.Success) +{ + Console.WriteLine($"Result: {integrator.Result}"); + Console.WriteLine($"Function evaluations: {integrator.FunctionEvaluations}"); +} +else +{ + Console.WriteLine($"Integration failed: {integrator.Status}"); +} +``` + +## Additional VEGAS Features + +The VEGAS integrator includes a special method for rare event analysis: + +```cs +var vegas = new Vegas(myFunction, dimensions, min, max); +vegas.ConfigureForRareEvents(); // Optimizes settings for rare event detection +vegas.Integrate(); +``` + +This method adjusts the internal parameters to better handle integrands with very small contributions over most of the domain, which is common in reliability analysis and rare event simulation. + +--- + +## References + +[1] P. J. Davis and P. Rabinowitz, *Methods of Numerical Integration*, 2nd ed., Mineola, New York: Dover Publications, Inc., 2007. + +[2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, *Numerical Recipes: The Art of Scientific Computing*, 3rd ed., Cambridge, UK: Cambridge University Press, 2007. + +[3] A. Ciric, *A Guide to Monte Carlo & Quantum Monte Carlo Methods*, Createspace Independent Publishing Platform, 2016. + +[4] G. Lepage, "A New Algorithm for Adaptive Multidimensional Integration," *Journal of Computational Physics*, vol. 27, no. 1, pp. 192-203, 1978. + +[5] G. Lepage, "VEGAS: An Adaptive Multidimensional Integration Program," Cornell University, 1980. + +--- + +[← Back to Index](../index.md) | [Next: Numerical Differentiation →](differentiation.md) diff --git a/docs/mathematics/linear-algebra.md b/docs/mathematics/linear-algebra.md new file mode 100644 index 00000000..615844d5 --- /dev/null +++ b/docs/mathematics/linear-algebra.md @@ -0,0 +1,490 @@ +# Linear Algebra + +[← Back to Index](../index.md) | [Next: Special Functions →](special-functions.md) + +The ***Numerics*** library provides `Matrix` and `Vector` classes for linear algebra operations. These classes support common operations needed for numerical computing, optimization, and statistical analysis. + +## Matrix Class + +### Creating Matrices + +```cs +using Numerics.Mathematics.LinearAlgebra; + +// Create matrix with dimensions +var m1 = new Matrix(3, 4); // 3 rows, 4 columns + +// Create square matrix +var m2 = new Matrix(3); // 3x3 matrix + +// Create from 2D array +double[,] data = { + { 1, 2, 3 }, + { 4, 5, 6 }, + { 7, 8, 9 } +}; +var m3 = new Matrix(data); + +// Create from 1D array (column vector) +double[] columnData = { 1, 2, 3, 4 }; +var m4 = new Matrix(columnData); // 4x1 matrix + +// Create from Vector +var vec = new Vector(new[] { 1.0, 2.0, 3.0 }); +var m5 = new Matrix(vec); +``` + +### Special Matrices + +```cs +// Identity matrix +var I = Matrix.Identity(3); // 3x3 identity + +// Zero matrix +var zeros = new Matrix(3, 3); // Initialized to zeros by default + +// Diagonal matrix +double[] diag = { 1, 2, 3 }; +var D = Matrix.Diagonal(diag); + +Console.WriteLine("Identity Matrix:"); +Console.WriteLine(I.ToString()); +``` + +### Accessing Elements + +```cs +var m = new Matrix(new double[,] { + { 1, 2, 3 }, + { 4, 5, 6 } +}); + +// Get/set elements +double value = m[0, 1]; // Get element at row 0, column 1 → 2 +m[1, 2] = 10; // Set element + +// Properties +int rows = m.NumberOfRows; // 2 +int cols = m.NumberOfColumns; // 3 +bool isSquare = m.IsSquare; // false + +Console.WriteLine($"Matrix dimensions: {rows} x {cols}"); +``` + +### Matrix Operations + +#### Transpose + +```cs +var A = new Matrix(new double[,] { + { 1, 2, 3 }, + { 4, 5, 6 } +}); + +// Instance method +var AT = A.Transpose(); // 3x2 matrix + +// Static method +var AT2 = Matrix.Transpose(A); + +Console.WriteLine("A:"); +Console.WriteLine(A.ToString()); +Console.WriteLine("\nA^T:"); +Console.WriteLine(AT.ToString()); +``` + +#### Matrix Multiplication + +```cs +var A = new Matrix(new double[,] { + { 1, 2 }, + { 3, 4 } +}); + +var B = new Matrix(new double[,] { + { 5, 6 }, + { 7, 8 } +}); + +// Matrix-matrix multiplication +var C = A.Multiply(B); // A * B + +// Operator overload +var C2 = A * B; + +Console.WriteLine("A * B:"); +Console.WriteLine(C.ToString()); + +// Scalar multiplication +var D = A.Multiply(2.0); // or A * 2.0 + +Console.WriteLine("\n2 * A:"); +Console.WriteLine(D.ToString()); +``` + +#### Matrix-Vector Multiplication + +```cs +var A = new Matrix(new double[,] { + { 1, 2, 3 }, + { 4, 5, 6 } +}); + +var x = new Vector(new[] { 1.0, 2.0, 3.0 }); + +// Matrix-vector multiplication +var b = A.Multiply(x); // A * x + +Console.WriteLine("A * x:"); +Console.WriteLine(b.ToString()); +``` + +#### Matrix Addition and Subtraction + +```cs +var A = new Matrix(new double[,] { + { 1, 2 }, + { 3, 4 } +}); + +var B = new Matrix(new double[,] { + { 5, 6 }, + { 7, 8 } +}); + +// Addition +var C = A + B; + +// Subtraction +var D = A - B; + +Console.WriteLine("A + B:"); +Console.WriteLine(C.ToString()); + +Console.WriteLine("\nA - B:"); +Console.WriteLine(D.ToString()); +``` + +### Matrix Properties + +#### Determinant + +```cs +var A = new Matrix(new double[,] { + { 4, 3 }, + { 6, 3 } +}); + +double det = A.Determinant(); + +Console.WriteLine($"Determinant: {det:F2}"); + +// For larger matrices +var B = new Matrix(new double[,] { + { 1, 2, 3 }, + { 0, 1, 4 }, + { 5, 6, 0 } +}); + +double det2 = B.Determinant(); +Console.WriteLine($"Determinant of 3x3: {det2:F2}"); +``` + +#### Inverse + +```cs +var A = new Matrix(new double[,] { + { 4, 7 }, + { 2, 6 } +}); + +try +{ + var Ainv = A.Inverse(); + + Console.WriteLine("A:"); + Console.WriteLine(A.ToString()); + Console.WriteLine("\nA^-1:"); + Console.WriteLine(Ainv.ToString()); + + // Verify: A * A^-1 = I + var I = A * Ainv; + Console.WriteLine("\nA * A^-1 (should be I):"); + Console.WriteLine(I.ToString()); +} +catch (InvalidOperationException ex) +{ + Console.WriteLine($"Matrix is singular: {ex.Message}"); +} +``` + +### Row and Column Operations + +```cs +var A = new Matrix(new double[,] { + { 1, 2, 3 }, + { 4, 5, 6 }, + { 7, 8, 9 } +}); + +// Get row +Vector row1 = A.GetRow(1); // Second row: [4, 5, 6] + +// Get column +Vector col2 = A.GetColumn(2); // Third column: [3, 6, 9] + +// Set row +A.SetRow(0, new Vector(new[] { 10.0, 11.0, 12.0 })); + +// Set column +A.SetColumn(1, new Vector(new[] { 20.0, 21.0, 22.0 })); + +Console.WriteLine("Modified matrix:"); +Console.WriteLine(A.ToString()); +``` + +## Vector Class + +### Creating Vectors + +```cs +using Numerics.Mathematics.LinearAlgebra; + +// Create from array +var v1 = new Vector(new[] { 1.0, 2.0, 3.0 }); + +// Create with size +var v2 = new Vector(5); // Length 5, initialized to zeros + +// Copy constructor +var v3 = new Vector(v1); + +Console.WriteLine($"Vector v1: {v1.ToString()}"); +Console.WriteLine($"Length: {v1.Length}"); +``` + +### Vector Operations + +#### Dot Product + +```cs +var a = new Vector(new[] { 1.0, 2.0, 3.0 }); +var b = new Vector(new[] { 4.0, 5.0, 6.0 }); + +double dot = a.DotProduct(b); // 1*4 + 2*5 + 3*6 = 32 + +Console.WriteLine($"a · b = {dot}"); +``` + +#### Norm (Magnitude) + +```cs +var v = new Vector(new[] { 3.0, 4.0 }); + +double norm = v.Norm(); // √(3² + 4²) = 5 + +Console.WriteLine($"||v|| = {norm}"); + +// Unit vector +var u = v.Normalize(); // u = v / ||v|| + +Console.WriteLine($"Unit vector: {u.ToString()}"); +Console.WriteLine($"||u|| = {u.Norm():F10}"); // Should be 1.0 +``` + +#### Vector Addition and Scaling + +```cs +var v1 = new Vector(new[] { 1.0, 2.0, 3.0 }); +var v2 = new Vector(new[] { 4.0, 5.0, 6.0 }); + +// Addition +var v3 = v1 + v2; + +// Subtraction +var v4 = v2 - v1; + +// Scalar multiplication +var v5 = v1 * 2.0; // or 2.0 * v1 + +Console.WriteLine($"v1 + v2 = {v3.ToString()}"); +Console.WriteLine($"v2 - v1 = {v4.ToString()}"); +Console.WriteLine($"2 * v1 = {v5.ToString()}"); +``` + +### Accessing Elements + +```cs +var v = new Vector(new[] { 10.0, 20.0, 30.0, 40.0 }); + +// Indexing +double x = v[0]; // 10.0 +v[2] = 35.0; // Set third element + +Console.WriteLine($"Modified vector: {v.ToString()}"); + +// Convert to array +double[] array = v.ToArray(); +``` + +## Practical Examples + +### Example 1: Solving Linear Systems (Ax = b) + +```cs +// Solve: 2x + 3y = 8 +// 4x + y = 10 + +var A = new Matrix(new double[,] { + { 2, 3 }, + { 4, 1 } +}); + +var b = new Vector(new[] { 8.0, 10.0 }); + +// Solve using matrix inversion: x = A^-1 * b +var Ainv = A.Inverse(); +var x = Ainv.Multiply(b); + +Console.WriteLine("Solution to Ax = b:"); +Console.WriteLine($"x = {x[0]:F2}"); +Console.WriteLine($"y = {x[1]:F2}"); + +// Verify solution +var check = A.Multiply(x); +Console.WriteLine($"\nVerification A*x = {check.ToString()}"); +Console.WriteLine($"Expected b = {b.ToString()}"); +``` + +### Example 2: Least Squares Regression + +```cs +// Fit y = a + bx to data +double[] xData = { 1, 2, 3, 4, 5 }; +double[] yData = { 2.1, 3.9, 6.2, 8.1, 9.8 }; + +int n = xData.Length; + +// Build design matrix: X = [1, x] +var X = new Matrix(n, 2); +for (int i = 0; i < n; i++) +{ + X[i, 0] = 1.0; // Intercept column + X[i, 1] = xData[i]; // x column +} + +var y = new Vector(yData); + +// Normal equations: (X^T X) β = X^T y +var XTX = X.Transpose() * X; +var XTy = X.Transpose().Multiply(y); + +// Solve for coefficients +var beta = XTX.Inverse().Multiply(XTy); + +double intercept = beta[0]; +double slope = beta[1]; + +Console.WriteLine($"Linear regression: y = {intercept:F3} + {slope:F3}x"); + +// Predictions +Console.WriteLine("\nPredictions:"); +for (int i = 0; i < n; i++) +{ + double pred = intercept + slope * xData[i]; + Console.WriteLine($"x={xData[i]}: y_obs={yData[i]:F1}, y_pred={pred:F1}"); +} +``` + +### Example 3: Covariance Matrix + +```cs +// Compute covariance matrix of multivariate data +double[,] data = { + { 1, 2, 3 }, // Observation 1 + { 4, 5, 6 }, // Observation 2 + { 7, 8, 9 }, // Observation 3 + { 2, 3, 4 } // Observation 4 +}; + +int n = data.GetLength(0); // Number of observations +int p = data.GetLength(1); // Number of variables + +// Center data (subtract means) +double[] means = new double[p]; +for (int j = 0; j < p; j++) +{ + for (int i = 0; i < n; i++) + means[j] += data[i, j]; + means[j] /= n; +} + +var X = new Matrix(n, p); +for (int i = 0; i < n; i++) + for (int j = 0; j < p; j++) + X[i, j] = data[i, j] - means[j]; + +// Covariance matrix: Σ = (1/(n-1)) X^T X +var XTX = X.Transpose() * X; +var Sigma = XTX * (1.0 / (n - 1)); + +Console.WriteLine("Covariance Matrix:"); +Console.WriteLine(Sigma.ToString()); + +// Correlation matrix +var Corr = new Matrix(p, p); +for (int i = 0; i < p; i++) +{ + for (int j = 0; j < p; j++) + { + Corr[i, j] = Sigma[i, j] / Math.Sqrt(Sigma[i, i] * Sigma[j, j]); + } +} + +Console.WriteLine("\nCorrelation Matrix:"); +Console.WriteLine(Corr.ToString()); +``` + +### Example 4: Distance Calculations + +```cs +var point1 = new Vector(new[] { 1.0, 2.0, 3.0 }); +var point2 = new Vector(new[] { 4.0, 6.0, 8.0 }); + +// Euclidean distance +var diff = point2 - point1; +double distance = diff.Norm(); + +Console.WriteLine($"Distance between points: {distance:F3}"); + +// Manhattan distance +double manhattan = 0; +for (int i = 0; i < point1.Length; i++) +{ + manhattan += Math.Abs(point2[i] - point1[i]); +} + +Console.WriteLine($"Manhattan distance: {manhattan:F1}"); +``` + +## Performance Considerations + +- Matrix operations create new objects - use in-place methods when available +- For large matrices, consider specialized libraries for decompositions +- Inverse is computationally expensive - avoid when possible +- For solving Ax=b, specialized solvers are more efficient than computing A^-1 + +## Common Operations Summary + +| Operation | Method | Complexity | +|-----------|--------|------------| +| Matrix multiplication | `A.Multiply(B)` or `A * B` | O(n³) | +| Transpose | `A.Transpose()` | O(n²) | +| Inverse | `A.Inverse()` | O(n³) | +| Determinant | `A.Determinant()` | O(n³) | +| Vector norm | `v.Norm()` | O(n) | +| Dot product | `v.DotProduct(w)` | O(n) | + +--- + +[← Back to Index](../index.md) | [Next: Special Functions →](special-functions.md) diff --git a/docs/mathematics/ode-solvers.md b/docs/mathematics/ode-solvers.md new file mode 100644 index 00000000..9a7b32a9 --- /dev/null +++ b/docs/mathematics/ode-solvers.md @@ -0,0 +1,357 @@ +# ODE Solvers + +[← Previous: Special Functions](special-functions.md) | [Back to Index](../index.md) | [Next: Hypothesis Tests →](../statistics/hypothesis-tests.md) + +The ***Numerics*** library provides Runge-Kutta methods for solving Ordinary Differential Equations (ODEs). These methods are essential for modeling dynamic systems, population dynamics, chemical reactions, and physical processes. + +## Overview + +An ordinary differential equation has the form: + +``` +dy/dt = f(t, y) +``` + +Given initial condition y(t₀) = y₀, we want to find y(t) for t > t₀. + +The `RungeKutta` class provides several methods for numerical integration: +- Second-order Runge-Kutta (RK2) +- Fourth-order Runge-Kutta (RK4) +- Runge-Kutta-Fehlberg (adaptive step size) +- Cash-Karp (adaptive step size) + +## Second-Order Runge-Kutta + +Simple but less accurate method: + +```cs +using Numerics.Mathematics.ODESolvers; + +// Solve: dy/dt = y, y(0) = 1 +// Analytical solution: y(t) = e^t + +Func f = (t, y) => y; + +double y0 = 1.0; // Initial condition +double t0 = 0.0; // Start time +double t1 = 1.0; // End time +int steps = 100; // Time steps + +double[] solution = RungeKutta.SecondOrder(f, y0, t0, t1, steps); + +// Check solution at t=1 +Console.WriteLine($"Numerical: y(1) = {solution[steps - 1]:F6}"); +Console.WriteLine($"Analytical: e^1 = {Math.E:F6}"); +Console.WriteLine($"Error: {Math.Abs(solution[steps - 1] - Math.E):E4}"); +``` + +## Fourth-Order Runge-Kutta (RK4) + +Most commonly used method - good balance of accuracy and efficiency: + +### Array Output (Multiple Time Steps) + +```cs +using Numerics.Mathematics.ODESolvers; + +// Solve: dy/dt = -2t*y, y(0) = 1 +// Analytical solution: y(t) = e^(-t²) + +Func f = (t, y) => -2.0 * t * y; + +double y0 = 1.0; +double t0 = 0.0; +double t1 = 2.0; +int steps = 200; + +double[] solution = RungeKutta.FourthOrder(f, y0, t0, t1, steps); + +// Print solution at several points +double dt = (t1 - t0) / (steps - 1); +for (int i = 0; i < steps; i += 40) +{ + double t = t0 + i * dt; + double numerical = solution[i]; + double analytical = Math.Exp(-t * t); + + Console.WriteLine($"t={t:F2}: y_num={numerical:F6}, y_exact={analytical:F6}, " + + $"error={Math.Abs(numerical - analytical):E4}"); +} +``` + +### Single Step + +Useful for step-by-step integration: + +```cs +// Single RK4 step +double t = 0.0; +double y = 1.0; +double dt = 0.1; + +double y_next = RungeKutta.FourthOrder(f, y, t, dt); + +Console.WriteLine($"y({t + dt:F1}) = {y_next:F6}"); +``` + +## Adaptive Step Size Methods + +Adaptive methods automatically adjust step size to maintain accuracy while minimizing computation. + +### Runge-Kutta-Fehlberg (RKF45) + +Fifth-order accuracy with embedded fourth-order error estimate: + +```cs +using Numerics.Mathematics.ODESolvers; + +// Stiff ODE: dy/dt = -15y, y(0) = 1 +Func f = (t, y) => -15.0 * y; + +double y0 = 1.0; +double t0 = 0.0; +double dt = 0.1; // Initial step size +double dtMin = 1e-6; // Minimum step size +double tolerance = 1e-6; // Error tolerance + +double y_final = RungeKutta.Fehlberg(f, y0, t0, dt, dtMin, tolerance); + +Console.WriteLine($"y({t0 + dt:F3}) = {y_final:F8}"); +Console.WriteLine($"Analytical: {Math.Exp(-15.0 * (t0 + dt)):F8}"); +``` + +### Cash-Karp Method + +Alternative adaptive method with different error estimation: + +```cs +double y_final_ck = RungeKutta.CashKarp(f, y0, t0, dt, dtMin, tolerance); + +Console.WriteLine($"Cash-Karp: y({t0 + dt:F3}) = {y_final_ck:F8}"); +``` + +**When to use adaptive methods:** +- Stiff equations +- Rapidly changing solutions +- Need to guarantee error tolerance +- Variable dynamics (slow then fast) + +## Practical Examples + +### Example 1: Exponential Decay (Radioactive Decay) + +```cs +// Model: dN/dt = -λN, where λ is decay constant +// N(t) = N₀ e^(-λt) + +double lambda = 0.693; // Half-life ≈ 1 year (ln(2)) +double N0 = 1000.0; // Initial amount + +Func decay = (t, N) => -lambda * N; + +double[] N = RungeKutta.FourthOrder(decay, N0, 0, 5.0, 100); + +Console.WriteLine("Radioactive Decay:"); +Console.WriteLine("Time | Amount | Half-Lives"); +double dt = 5.0 / 99; +for (int i = 0; i < N.Length; i += 20) +{ + double t = i * dt; + double halfLives = t / (Math.Log(2) / lambda); + Console.WriteLine($"{t,4:F2} | {N[i],6:F1} | {halfLives,10:F3}"); +} +``` + +### Example 2: Logistic Growth (Population Dynamics) + +```cs +// Model: dP/dt = r*P*(1 - P/K) +// Where: r = growth rate, K = carrying capacity + +double r = 0.5; // Growth rate +double K = 1000.0; // Carrying capacity +double P0 = 10.0; // Initial population + +Func logistic = (t, P) => r * P * (1.0 - P / K); + +double[] P = RungeKutta.FourthOrder(logistic, P0, 0, 20.0, 200); + +Console.WriteLine("Logistic Growth:"); +Console.WriteLine("Time | Population | % of Carrying Capacity"); +double dt = 20.0 / 199; +for (int i = 0; i < P.Length; i += 40) +{ + double t = i * dt; + double percent = 100.0 * P[i] / K; + Console.WriteLine($"{t,4:F1} | {P[i],10:F1} | {percent,22:F1}%"); +} + +// Find time to reach 95% of carrying capacity +for (int i = 0; i < P.Length; i++) +{ + if (P[i] >= 0.95 * K) + { + Console.WriteLine($"\nReaches 95% capacity at t = {i * dt:F2}"); + break; + } +} +``` + +### Example 3: Harmonic Oscillator + +For second-order ODEs, convert to system of first-order ODEs: + +```cs +// Second-order: d²y/dt² + ω²y = 0 +// Convert to system: +// dy₁/dt = y₂ +// dy₂/dt = -ω²y₁ +// Where y₁ = position, y₂ = velocity + +double omega = 2.0 * Math.PI; // Angular frequency (1 Hz) + +// For systems, use multiple passes +double y1_0 = 1.0; // Initial position +double y2_0 = 0.0; // Initial velocity + +Func dy1dt = (t, y1) => y2; // Needs y2 +Func dy2dt = (t, y2) => -omega * omega * y1; // Needs y1 + +// Need to track both variables manually +int steps = 1000; +double t0 = 0.0, t1 = 2.0; +double dt = (t1 - t0) / (steps - 1); + +double[] y1 = new double[steps]; // Position +double[] y2 = new double[steps]; // Velocity + +y1[0] = y1_0; +y2[0] = y2_0; + +for (int i = 1; i < steps; i++) +{ + double t = t0 + (i - 1) * dt; + + // RK4 for system + double k1_y1 = y2[i - 1]; + double k1_y2 = -omega * omega * y1[i - 1]; + + double k2_y1 = y2[i - 1] + 0.5 * dt * k1_y2; + double k2_y2 = -omega * omega * (y1[i - 1] + 0.5 * dt * k1_y1); + + double k3_y1 = y2[i - 1] + 0.5 * dt * k2_y2; + double k3_y2 = -omega * omega * (y1[i - 1] + 0.5 * dt * k2_y1); + + double k4_y1 = y2[i - 1] + dt * k3_y2; + double k4_y2 = -omega * omega * (y1[i - 1] + dt * k3_y1); + + y1[i] = y1[i - 1] + dt / 6.0 * (k1_y1 + 2 * k2_y1 + 2 * k3_y1 + k4_y1); + y2[i] = y2[i - 1] + dt / 6.0 * (k1_y2 + 2 * k2_y2 + 2 * k3_y2 + k4_y2); +} + +Console.WriteLine("Simple Harmonic Motion:"); +Console.WriteLine("Time | Position | Velocity"); +for (int i = 0; i < steps; i += 100) +{ + double t = t0 + i * dt; + Console.WriteLine($"{t,4:F2} | {y1[i],8:F4} | {y2[i],8:F4}"); +} +``` + +### Example 4: Predator-Prey (Lotka-Volterra) + +Classic ecology model: + +```cs +// dx/dt = αx - βxy (prey) +// dy/dt = δxy - γy (predator) + +double alpha = 1.0; // Prey growth rate +double beta = 0.1; // Predation rate +double delta = 0.075; // Predator growth from predation +double gamma = 1.5; // Predator death rate + +double x0 = 10.0; // Initial prey +double y0 = 5.0; // Initial predator + +int steps = 1000; +double dt = 0.01; +double t0 = 0.0; + +double[] x = new double[steps]; // Prey +double[] y = new double[steps]; // Predator + +x[0] = x0; +y[0] = y0; + +for (int i = 1; i < steps; i++) +{ + double t = t0 + (i - 1) * dt; + + // RK4 for Lotka-Volterra system + double k1_x = alpha * x[i - 1] - beta * x[i - 1] * y[i - 1]; + double k1_y = delta * x[i - 1] * y[i - 1] - gamma * y[i - 1]; + + double k2_x = alpha * (x[i - 1] + 0.5 * dt * k1_x) - + beta * (x[i - 1] + 0.5 * dt * k1_x) * (y[i - 1] + 0.5 * dt * k1_y); + double k2_y = delta * (x[i - 1] + 0.5 * dt * k1_x) * (y[i - 1] + 0.5 * dt * k1_y) - + gamma * (y[i - 1] + 0.5 * dt * k1_y); + + double k3_x = alpha * (x[i - 1] + 0.5 * dt * k2_x) - + beta * (x[i - 1] + 0.5 * dt * k2_x) * (y[i - 1] + 0.5 * dt * k2_y); + double k3_y = delta * (x[i - 1] + 0.5 * dt * k2_x) * (y[i - 1] + 0.5 * dt * k2_y) - + gamma * (y[i - 1] + 0.5 * dt * k2_y); + + double k4_x = alpha * (x[i - 1] + dt * k3_x) - + beta * (x[i - 1] + dt * k3_x) * (y[i - 1] + dt * k3_y); + double k4_y = delta * (x[i - 1] + dt * k3_x) * (y[i - 1] + dt * k3_y) - + gamma * (y[i - 1] + dt * k3_y); + + x[i] = x[i - 1] + dt / 6.0 * (k1_x + 2 * k2_x + 2 * k3_x + k4_x); + y[i] = y[i - 1] + dt / 6.0 * (k1_y + 2 * k2_y + 2 * k3_y + k4_y); +} + +Console.WriteLine("Predator-Prey Dynamics:"); +Console.WriteLine("Time | Prey | Predator"); +for (int i = 0; i < steps; i += 100) +{ + double t = t0 + i * dt; + Console.WriteLine($"{t,5:F2} | {x[i],4:F1} | {y[i],8:F1}"); +} +``` + +## Choosing a Method + +| Method | Order | When to Use | +|--------|-------|-------------| +| **Second-Order** | O(h²) | Simple problems, rough estimates | +| **Fourth-Order** | O(h⁴) | General purpose, good accuracy | +| **Fehlberg** | O(h⁵) | Adaptive control, stiff equations | +| **Cash-Karp** | O(h⁵) | Alternative adaptive method | + +**Step Size Guidelines:** +- Fixed step: Choose dt such that solution is smooth +- Adaptive: Set tolerance based on required accuracy +- Stiff equations: Use smaller steps or adaptive methods +- For stability: dt < 2/|λ_max| where λ_max is largest eigenvalue + +## Best Practices + +1. **Verify with known solutions** when possible +2. **Check convergence** by halving step size +3. **Use adaptive methods** for variable dynamics +4. **Monitor conservation** (energy, mass) if applicable +5. **Plot solutions** to detect instabilities +6. **Consider stability** for stiff equations + +## Limitations + +- Fixed-step methods require careful step size selection +- Stiff equations may require specialized solvers +- Systems require manual coupling of equations +- No built-in event detection +- For very high accuracy, consider specialized ODE libraries + +--- + +[← Previous: Special Functions](special-functions.md) | [Back to Index](../index.md) | [Next: Hypothesis Tests →](../statistics/hypothesis-tests.md) diff --git a/docs/mathematics/optimization.md b/docs/mathematics/optimization.md new file mode 100644 index 00000000..8849c688 --- /dev/null +++ b/docs/mathematics/optimization.md @@ -0,0 +1,592 @@ +# Optimization + +[← Previous: Numerical Differentiation](differentiation.md) | [Back to Index](../index.md) | [Next: Linear Algebra →](linear-algebra.md) + +Optimization is the process of finding the parameter set that minimizes (or maximizes) an objective function. The ***Numerics*** library provides a comprehensive suite of local and global optimization algorithms for both unconstrained and constrained problems. These methods are essential for parameter estimation, model calibration, machine learning, and engineering design optimization. + +## Overview + +| Method | Type | Best For | Requires Derivatives | +|--------|------|----------|---------------------| +| **Local Methods** | | | | +| BFGS | Local | Smooth, differentiable functions | No (numerical) | +| Nelder-Mead | Local | General purpose, robust | No | +| Powell | Local | Smooth functions without derivatives | No | +| Golden Section | Local | 1D problems | No | +| Brent Search | Local | 1D problems, smooth functions | No | +| Gradient Descent | Local | Large-scale, smooth problems | No (numerical) | +| ADAM | Local | Machine learning, stochastic | No (numerical) | +| **Global Methods** | | | | +| Differential Evolution | Global | Multimodal, robust | No | +| Particle Swarm | Global | Multimodal, fast convergence | No | +| Shuffled Complex Evolution | Global | Hydrological calibration | No | +| Simulated Annealing | Global | Discrete/continuous, multimodal | No | +| Multi-Start | Global | Combines local search with random starts | No | +| MLSL | Global | Smooth multimodal functions | Requires local solver | +| **Constrained** | | | | +| Augmented Lagrangian | Constrained | Equality and inequality constraints | No | + +## Problem Formulation + +An optimization problem seeks to find: + +```math +\min_{\mathbf{x}} f(\mathbf{x}) +``` + +subject to: + +```math +\mathbf{x}_L \leq \mathbf{x} \leq \mathbf{x}_U +``` + +where $f(\mathbf{x})$ is the objective function, $\mathbf{x}$ is the parameter vector, and $\mathbf{x}_L$, $\mathbf{x}_U$ are lower and upper bounds. + +For maximization problems, minimize $-f(\mathbf{x})$ or use the `Maximize()` method. + +## Common Interface + +All optimizers in ***Numerics*** inherit from the `Optimizer` base class and share a common interface. + +### Input Properties +- `MaxIterations`: Maximum iterations allowed (default = 10,000) +- `MaxFunctionEvaluations`: Maximum function evaluations (default = int.MaxValue) +- `AbsoluteTolerance`: Absolute convergence tolerance (default = 1E-8) +- `RelativeTolerance`: Relative convergence tolerance (default = 1E-8) +- `ReportFailure`: Throw exception on failure (default = true) +- `RecordTraces`: Save optimization trace (default = true) +- `ComputeHessian`: Compute Hessian at solution (default = true) + +### Output Properties +- `BestParameterSet`: Optimal solution (Values, Fitness) +- `Iterations`: Number of iterations performed +- `FunctionEvaluations`: Number of function evaluations +- `Status`: Optimization status (Success, Failure, etc.) +- `ParameterSetTrace`: Full trace of parameter evaluations +- `Hessian`: Numerically differentiated Hessian matrix (if computed) + +### Methods +- `Minimize()`: Minimize the objective function +- `Maximize()`: Maximize the objective function (minimizes $-f(\mathbf{x})$) + +## Local Optimization + +Local optimization methods find the nearest local minimum from a starting point. They are fast and efficient for smooth, unimodal functions but may get trapped in local minima for multimodal problems. + +### BFGS (Broyden-Fletcher-Goldfarb-Shanno) + +BFGS is a quasi-Newton method that builds an approximation to the Hessian matrix using gradient information [[1]](#1). It's one of the most effective methods for smooth, unconstrained optimization. + +```cs +using Numerics.Mathematics.Optimization; + +// Rosenbrock function: f(x,y) = (1-x)² + 100(y-x²)² +double Rosenbrock(double[] x) +{ + return Math.Pow(1 - x[0], 2) + 100 * Math.Pow(x[1] - x[0] * x[0], 2); +} + +// Initial guess +var initial = new double[] { -1.2, 1.0 }; +var lower = new double[] { -2, -2 }; +var upper = new double[] { 2, 2 }; + +// Create and configure optimizer +var bfgs = new BFGS(Rosenbrock, 2, initial, lower, upper); +bfgs.RelativeTolerance = 1e-8; +bfgs.MaxIterations = 1000; + +// Minimize +bfgs.Minimize(); + +// Results +Console.WriteLine($"Optimal solution: x = {bfgs.BestParameterSet.Values[0]:F6}, y = {bfgs.BestParameterSet.Values[1]:F6}"); +Console.WriteLine($"Minimum value: {bfgs.BestParameterSet.Fitness:F10}"); +Console.WriteLine($"Iterations: {bfgs.Iterations}"); +Console.WriteLine($"Function evaluations: {bfgs.FunctionEvaluations}"); +Console.WriteLine($"Status: {bfgs.Status}"); +``` + +**Advantages**: Fast convergence, memory efficient, works well for most smooth problems. + +**Disadvantages**: Can fail on non-smooth functions, requires bounded search space. + +### Nelder-Mead Simplex + +The Nelder-Mead method is a direct search algorithm that uses a simplex (geometric figure with n+1 vertices in n dimensions) to search the parameter space [[2]](#2). It's robust and doesn't require derivatives. + +```cs +var nm = new NelderMead(Rosenbrock, 2, initial, lower, upper); +nm.RelativeTolerance = 1e-6; +nm.Minimize(); + +Console.WriteLine($"Optimal solution: [{nm.BestParameterSet.Values[0]:F6}, {nm.BestParameterSet.Values[1]:F6}]"); +Console.WriteLine($"Minimum value: {nm.BestParameterSet.Fitness:F10}"); +``` + +**Advantages**: Very robust, doesn't require derivatives, handles non-smooth functions. + +**Disadvantages**: Slower convergence than gradient-based methods, can stagnate. + +### Powell's Method + +Powell's method is a conjugate direction algorithm that doesn't require derivatives [[1]](#1). It performs successive line searches along conjugate directions. + +```cs +var powell = new Powell(Rosenbrock, 2, initial, lower, upper); +powell.RelativeTolerance = 1e-8; +powell.Minimize(); + +Console.WriteLine($"Solution: [{powell.BestParameterSet.Values[0]:F6}, {powell.BestParameterSet.Values[1]:F6}]"); +``` + +**Advantages**: No derivatives required, good for smooth functions. + +**Disadvantages**: Can be slow in high dimensions, sensitive to scaling. + +### Gradient Descent + +Simple gradient-based optimization with line search: + +```cs +var gd = new GradientDescent(Rosenbrock, 2, initial, lower, upper); +gd.LearningRate = 0.001; // Step size +gd.Minimize(); +``` + +### ADAM Optimizer + +Adaptive Moment Estimation, popular in machine learning applications [[3]](#3): + +```cs +var adam = new ADAM(Rosenbrock, 2, initial, lower, upper); +adam.LearningRate = 0.001; +adam.Beta1 = 0.9; // First moment decay +adam.Beta2 = 0.999; // Second moment decay +adam.Minimize(); +``` + +**Advantages**: Adaptive learning rates, works well for noisy objectives. + +**Best for**: Machine learning, stochastic optimization problems. + +### One-Dimensional Methods + +For single-parameter optimization, specialized 1D methods are more efficient: + +#### Golden Section Search + +Uses the golden ratio to bracket the minimum: + +```cs +Func f1d = x => Math.Pow(x - 2, 2) + 3; + +var golden = new GoldenSection(f1d, 1); +golden.LowerBounds = new[] { 0.0 }; +golden.UpperBounds = new[] { 5.0 }; +golden.Minimize(); + +Console.WriteLine($"Minimum at x = {golden.BestParameterSet.Values[0]:F6}"); +``` + +#### Brent Search + +Combines golden section search with parabolic interpolation for faster convergence: + +```cs +var brent = new BrentSearch(f1d, 1); +brent.LowerBounds = new[] { 0.0 }; +brent.UpperBounds = new[] { 5.0 }; +brent.Minimize(); +``` + +## Global Optimization + +Global optimization methods are designed to find the global minimum across the entire search space, avoiding local minima. They typically require more function evaluations but are more robust for multimodal problems. + +### Differential Evolution + +Differential Evolution (DE) is a population-based evolutionary algorithm that's very robust for continuous optimization [[4]](#4). It creates trial vectors by combining existing population members. + +```cs +using Numerics.Mathematics.Optimization; + +// Rastrigin function (highly multimodal) +double Rastrigin(double[] x) +{ + double sum = 10 * x.Length; + for (int i = 0; i < x.Length; i++) + { + sum += x[i] * x[i] - 10 * Math.Cos(2 * Math.PI * x[i]); + } + return sum; +} + +var lower = new double[] { -5.12, -5.12 }; +var upper = new double[] { 5.12, 5.12 }; + +var de = new DifferentialEvolution(Rastrigin, 2, lower, upper); +de.PopulationSize = 50; +de.CrossoverProbability = 0.9; +de.DifferentialWeight = 0.8; +de.MaxIterations = 1000; +de.Minimize(); + +Console.WriteLine($"Global minimum: [{de.BestParameterSet.Values[0]:F6}, {de.BestParameterSet.Values[1]:F6}]"); +Console.WriteLine($"Function value: {de.BestParameterSet.Fitness:F10}"); +``` + +**Advantages**: Very robust, handles discontinuities, good for difficult problems. + +**Parameters**: +- `PopulationSize`: Number of candidate solutions (default = 10 × dimensions) +- `CrossoverProbability`: Probability of crossover (default = 0.9) +- `DifferentialWeight`: Scaling factor for mutation (default = 0.8) + +### Particle Swarm Optimization + +PSO simulates social behavior of bird flocking or fish schooling [[5]](#5). Particles move through the search space influenced by their own best position and the swarm's best position. + +```cs +var pso = new ParticleSwarm(Rastrigin, 2, lower, upper); +pso.PopulationSize = 40; +pso.InertiaWeight = 0.7; +pso.CognitiveWeight = 1.5; // Personal best influence +pso.SocialWeight = 1.5; // Global best influence +pso.Minimize(); + +Console.WriteLine($"Solution: [{pso.BestParameterSet.Values[0]:F6}, {pso.BestParameterSet.Values[1]:F6}]"); +``` + +**Advantages**: Fast, simple to implement, works well for continuous problems. + +**Parameters**: +- `PopulationSize`: Number of particles (default = 10 × dimensions) +- `InertiaWeight`: Momentum (default = 0.7) +- `CognitiveWeight`: Personal best attraction (default = 1.5) +- `SocialWeight`: Global best attraction (default = 1.5) + +### Shuffled Complex Evolution (SCE-UA) + +SCE-UA was specifically developed for calibrating hydrological models [[6]](#6). It combines complex shuffling with competitive evolution. + +```cs +var sce = new ShuffledComplexEvolution(Rastrigin, 2, lower, upper); +sce.NumberOfComplexes = 5; +sce.ComplexSize = 10; +sce.MaxIterations = 1000; +sce.Minimize(); + +Console.WriteLine($"Calibrated parameters: [{sce.BestParameterSet.Values[0]:F6}, {sce.BestParameterSet.Values[1]:F6}]"); +``` + +**Advantages**: Excellent for hydrological model calibration, balances exploration and exploitation. + +**Best for**: Calibrating watershed models, water resources applications. + +### Simulated Annealing + +SA mimics the physical process of annealing in metallurgy [[7]](#7). It accepts uphill moves with decreasing probability, allowing escape from local minima. + +```cs +var sa = new SimulatedAnnealing(Rastrigin, 2, lower, upper); +sa.InitialTemperature = 100.0; +sa.CoolingRate = 0.95; +sa.MaxIterations = 10000; +sa.Minimize(); + +Console.WriteLine($"Solution: [{sa.BestParameterSet.Values[0]:F6}, {sa.BestParameterSet.Values[1]:F6}]"); +``` + +**Advantages**: Can escape local minima, works for discrete and continuous problems. + +**Parameters**: +- `InitialTemperature`: Starting temperature (default = 100) +- `CoolingRate`: Temperature reduction factor (default = 0.95) + +### Multi-Start Optimization + +Combines local search with multiple random starting points: + +```cs +var ms = new MultiStart(Rastrigin, 2, lower, upper); +ms.LocalMethod = LocalMethod.BFGS; // Choose local optimizer +ms.NumberOfStarts = 20; +ms.Minimize(); + +Console.WriteLine($"Best solution: [{ms.BestParameterSet.Values[0]:F6}, {ms.BestParameterSet.Values[1]:F6}]"); +``` + +**Advantages**: Simple to implement, leverages fast local search. + +**Disadvantages**: May waste evaluations in same basin of attraction. + +### MLSL (Multi-Level Single Linkage) + +Clustering-based global optimization that avoids redundant local searches: + +```cs +var mlsl = new MLSL(Rastrigin, 2, lower, upper); +mlsl.LocalMethod = LocalMethod.BFGS; +mlsl.Minimize(); +``` + +**Advantages**: More efficient than multi-start, avoids redundant searches. + +## Constrained Optimization + +### Augmented Lagrangian + +The Augmented Lagrangian method handles equality and inequality constraints by adding penalty terms to the objective function [[8]](#8). + +```cs +using Numerics.Mathematics.Optimization; + +// Objective: minimize x² + y² +double Objective(double[] x) +{ + return x[0] * x[0] + x[1] * x[1]; +} + +// Constraint: x + y >= 1 +var constraint = new Constraint( + x => x[0] + x[1] - 1, // g(x) >= 0 form + ConstraintType.GreaterThanOrEqualTo +); + +var lower = new double[] { -5, -5 }; +var upper = new double[] { 5, 5 }; +var initial = new double[] { 0, 0 }; + +var al = new AugmentedLagrange(Objective, 2, initial, lower, upper); +al.AddConstraint(constraint); +al.LocalMethod = LocalMethod.BFGS; // Local optimizer for subproblems +al.MaxIterations = 100; +al.Minimize(); + +Console.WriteLine($"Optimal solution: [{al.BestParameterSet.Values[0]:F6}, {al.BestParameterSet.Values[1]:F6}]"); +Console.WriteLine($"Constraint satisfied: {al.BestParameterSet.Values[0] + al.BestParameterSet.Values[1]:F6} >= 1"); +``` + +**Constraint Types**: +- `ConstraintType.EqualTo`: Equality constraint $g(\mathbf{x}) = 0$ +- `ConstraintType.LessThanOrEqualTo`: Inequality constraint $g(\mathbf{x}) \leq 0$ +- `ConstraintType.GreaterThanOrEqualTo`: Inequality constraint $g(\mathbf{x}) \geq 0$ + +**Example: Minimize subject to multiple constraints**: + +```cs +// Minimize f(x,y) = (x-3)² + (y-2)² +// Subject to: x + y <= 5 +// x >= 1 +// y >= 1 + +double ObjectiveFunc(double[] x) +{ + return Math.Pow(x[0] - 3, 2) + Math.Pow(x[1] - 2, 2); +} + +var c1 = new Constraint(x => 5 - x[0] - x[1], ConstraintType.GreaterThanOrEqualTo); +var c2 = new Constraint(x => x[0] - 1, ConstraintType.GreaterThanOrEqualTo); +var c3 = new Constraint(x => x[1] - 1, ConstraintType.GreaterThanOrEqualTo); + +var constrained = new AugmentedLagrange(ObjectiveFunc, 2, new[] { 2.0, 2.0 }, + new[] { 0.0, 0.0 }, new[] { 10.0, 10.0 }); +constrained.AddConstraint(c1); +constrained.AddConstraint(c2); +constrained.AddConstraint(c3); +constrained.Minimize(); + +Console.WriteLine($"Constrained optimum: [{constrained.BestParameterSet.Values[0]:F4}, " + + $"{constrained.BestParameterSet.Values[1]:F4}]"); +``` + +## Practical Example: Calibrating a Hydrological Model + +A complete example of using optimization to calibrate a watershed model: + +```cs +using Numerics.Mathematics.Optimization; +using Numerics.Data.Statistics; + +// Observed streamflow data +double[] observed = { 12.5, 15.3, 18.7, 16.2, 14.1, 11.8, 10.3 }; + +// Simple runoff model: Q = C × P^α where Q is flow, P is precipitation, C and α are parameters +double[] precipitation = { 10, 12, 15, 13, 11, 9, 8 }; + +// Objective function: minimize RMSE +double ObjectiveFunction(double[] parameters) +{ + double C = parameters[0]; + double alpha = parameters[1]; + + // Simulate streamflow + double[] simulated = new double[observed.Length]; + for (int i = 0; i < observed.Length; i++) + { + simulated[i] = C * Math.Pow(precipitation[i], alpha); + } + + // Compute RMSE + double rmse = Statistics.RMSE(observed, simulated); + return rmse; +} + +// Parameter bounds +var lower = new double[] { 0.1, 0.5 }; // C >= 0.1, α >= 0.5 +var upper = new double[] { 5.0, 3.0 }; // C <= 5.0, α <= 3.0 + +// Use SCE-UA (recommended for hydrological calibration) +var optimizer = new ShuffledComplexEvolution(ObjectiveFunction, 2, lower, upper); +optimizer.NumberOfComplexes = 5; +optimizer.MaxIterations = 1000; +optimizer.Minimize(); + +Console.WriteLine($"Calibrated parameters:"); +Console.WriteLine($" C = {optimizer.BestParameterSet.Values[0]:F4}"); +Console.WriteLine($" α = {optimizer.BestParameterSet.Values[1]:F4}"); +Console.WriteLine($" RMSE = {optimizer.BestParameterSet.Fitness:F4}"); +Console.WriteLine($" Function evaluations: {optimizer.FunctionEvaluations}"); + +// Verify calibration +double[] final_simulated = new double[observed.Length]; +for (int i = 0; i < observed.Length; i++) +{ + final_simulated[i] = optimizer.BestParameterSet.Values[0] * + Math.Pow(precipitation[i], optimizer.BestParameterSet.Values[1]); +} + +double nse = Statistics.NSE(observed, final_simulated); +Console.WriteLine($" NSE = {nse:F4}"); +``` + +## Choosing an Optimization Method + +| Problem Type | Recommended Method | Notes | +|-------------|-------------------|-------| +| Smooth, unimodal | BFGS or Powell | Fast convergence to local minimum | +| Non-smooth, unimodal | Nelder-Mead | Robust direct search | +| Multimodal, global search | Differential Evolution or SCE-UA | Thorough exploration | +| Quick global search | Particle Swarm | Faster but less thorough | +| Hydrological calibration | SCE-UA | Specifically designed for this | +| With constraints | Augmented Lagrangian | Handles equality and inequality constraints | +| 1D problem | Brent Search or Golden Section | Specialized efficient methods | +| Machine learning | ADAM | Adaptive learning rates | +| Unknown smoothness | Start with Nelder-Mead | Very robust | +| High dimensions (>20) | Differential Evolution or ADAM | Scale better than others | + +## Best Practices + +1. **Scale Variables**: Normalize parameters to similar ranges (e.g., [0,1] or [-1,1]) for better convergence. + +2. **Set Reasonable Bounds**: Tight bounds improve convergence but shouldn't exclude the optimum. + +3. **Multiple Runs**: For global optimization, run multiple times with different random seeds to ensure robustness. + +4. **Hybrid Approach**: Use global method followed by local refinement: + ```cs + // Global search + var de = new DifferentialEvolution(ObjectiveFunc, n, lower, upper); + de.Minimize(); + + // Local refinement + var bfgs = new BFGS(ObjectiveFunc, n, de.BestParameterSet.Values, lower, upper); + bfgs.Minimize(); + ``` + +5. **Monitor Convergence**: Check the `ParameterSetTrace` to diagnose convergence issues: + ```cs + foreach (var ps in optimizer.ParameterSetTrace) + { + Console.WriteLine($"Iteration {ps.Fitness}"); + } + ``` + +6. **Adjust Tolerances**: Tighter tolerances require more evaluations: + ```cs + optimizer.RelativeTolerance = 1e-10; // Very tight + optimizer.AbsoluteTolerance = 1e-10; + ``` + +7. **Population Size**: For global methods, larger populations explore better but cost more: + ```cs + de.PopulationSize = 20 * NumberOfParameters; // Rule of thumb + ``` + +## Understanding Results + +### Parameter Set Structure + +The `BestParameterSet` contains: +- `Values`: The optimal parameter vector +- `Fitness`: The objective function value at the optimum +- `Weight`: (Optional) Used internally by some algorithms + +### Hessian Matrix + +When `ComputeHessian = true`, the Hessian at the solution is computed numerically. This provides information about parameter sensitivity and uncertainty: + +```cs +optimizer.ComputeHessian = true; +optimizer.Minimize(); + +// Access Hessian +var H = optimizer.Hessian; + +// Parameter standard errors (approximation) +for (int i = 0; i < optimizer.NumberOfParameters; i++) +{ + double se = Math.Sqrt(Math.Abs(1.0 / H[i, i])); + Console.WriteLine($"Parameter {i} ± {se:F4}"); +} +``` + +### Optimization Status + +Check the `Status` property to verify success: + +```cs +if (optimizer.Status == OptimizationStatus.Success) +{ + Console.WriteLine("Optimization converged successfully"); +} +else if (optimizer.Status == OptimizationStatus.MaxIterationsReached) +{ + Console.WriteLine("Maximum iterations reached - may not have converged"); +} +``` + +## Performance Tips + +1. **Function Evaluation Cost**: Most optimization time is spent evaluating the objective function. Optimize your function first. + +2. **Parallel Evaluation**: For population-based methods (DE, PSO), evaluate population members in parallel if your function is thread-safe. + +3. **Warm Start**: If solving similar problems repeatedly, use the previous solution as initial guess. + +4. **Gradient Information**: If you can provide analytical gradients, BFGS will converge much faster (though the current implementation uses numerical gradients). + +--- + +## References + +[1] Nocedal, J., & Wright, S. J. (2006). *Numerical Optimization* (2nd ed.). Springer. + +[2] Nelder, J. A., & Mead, R. (1965). A simplex method for function minimization. *The Computer Journal*, 7(4), 308-313. + +[3] Kingma, D. P., & Ba, J. (2014). Adam: A method for stochastic optimization. *arXiv preprint arXiv:1412.6980*. + +[4] Storn, R., & Price, K. (1997). Differential evolution–a simple and efficient heuristic for global optimization over continuous spaces. *Journal of Global Optimization*, 11(4), 341-359. + +[5] Kennedy, J., & Eberhart, R. (1995). Particle swarm optimization. *Proceedings of IEEE International Conference on Neural Networks*, 4, 1942-1948. + +[6] Duan, Q., Sorooshian, S., & Gupta, V. (1992). Effective and efficient global optimization for conceptual rainfall-runoff models. *Water Resources Research*, 28(4), 1015-1031. + +[7] Kirkpatrick, S., Gelatt, C. D., & Vecchi, M. P. (1983). Optimization by simulated annealing. *Science*, 220(4598), 671-680. + +[8] Birgin, E. G., & Martínez, J. M. (2014). *Practical Augmented Lagrangian Methods for Constrained Optimization*. SIAM. + +--- + +[← Previous: Numerical Differentiation](differentiation.md) | [Back to Index](../index.md) | [Next: Linear Algebra →](linear-algebra.md) diff --git a/docs/mathematics/root-finding.md b/docs/mathematics/root-finding.md new file mode 100644 index 00000000..59b18c00 --- /dev/null +++ b/docs/mathematics/root-finding.md @@ -0,0 +1,473 @@ +# Root Finding + +[← Previous: Optimization](optimization.md) | [Back to Index](../index.md) | [Next: Linear Algebra →](linear-algebra.md) + +Root finding is the process of determining the values of $x$ for which a function $f(x) = 0$. These are also called zeros, roots, or solutions of the equation. Root finding is fundamental to many numerical methods and applications, including solving nonlinear equations, finding equilibrium points, and numerical integration of differential equations. + +## Overview + +The ***Numerics*** library provides several robust algorithms for finding roots of univariate functions: + +| Method | Requires | Convergence | Best For | +|--------|----------|-------------|----------| +| **Bisection** | Bracketing interval | Linear (slow but sure) | Robust, guaranteed convergence | +| **Secant** | Two initial points | Superlinear (~1.618) | When derivative unavailable | +| **Newton-Raphson** | Derivative | Quadratic (very fast) | Smooth functions, good initial guess | +| **Brent** | Bracketing interval | Superlinear | General purpose, best overall | + +## Problem Formulation + +Given a function $f: \mathbb{R} \rightarrow \mathbb{R}$, find $x^*$ such that: + +```math +f(x^*) = 0 +``` + +### Bracketing + +Some methods require a **bracketing interval** $[a, b]$ where $f(a)$ and $f(b)$ have opposite signs. By the Intermediate Value Theorem, if $f$ is continuous, there must be at least one root in the interval. + +## Bisection Method + +The bisection method is the simplest root-finding algorithm. It repeatedly bisects an interval and selects the subinterval in which the root must lie [[1]](#1). + +### Algorithm + +1. Start with interval $[a, b]$ where $f(a) \cdot f(b) < 0$ +2. Compute midpoint $c = (a + b) / 2$ +3. If $f(c)$ is close enough to zero, return $c$ +4. Otherwise, replace either $a$ or $b$ with $c$ based on the sign of $f(c)$ +5. Repeat until convergence + +### Usage + +```cs +using Numerics.Mathematics; + +// Find root of f(x) = x² - 4 (roots at x = ±2) +Func f = x => x * x - 4; + +// Bisection requires a bracketing interval +double root = Bisection.Solve(f, + firstGuess: 1.5, // Optional hint for initial interval + lowerBound: 0, // f(0) = -4 < 0 + upperBound: 3, // f(3) = 5 > 0 + tolerance: 1e-8, + maxIterations: 1000, + reportFailure: true); + +Console.WriteLine($"Root: {root:F10}"); // 2.0000000000 +Console.WriteLine($"Verification: f({root}) = {f(root):E3}"); +``` + +### Advantages and Disadvantages + +**Advantages:** +- Always converges if initial interval brackets a root +- Very robust - works even for discontinuous functions +- Simple to implement and understand +- Guaranteed to find a root + +**Disadvantages:** +- Slow convergence (linear, each iteration reduces error by half) +- Requires bracketing interval +- Cannot find roots where function doesn't change sign (e.g., $f(x) = x^2$) + +**When to use:** When robustness is paramount, or for poorly behaved functions. + +## Brent's Method + +Brent's method combines the robustness of bisection with the speed of secant and inverse quadratic interpolation [[2]](#2). It's generally the best general-purpose root finder. + +### Algorithm + +The method maintains a bracketing interval and uses: +- **Inverse quadratic interpolation** when three points are available +- **Secant method** when two points are available +- **Bisection** as a fallback to guarantee convergence + +### Usage + +```cs +using Numerics.Mathematics; + +// Find root of f(x) = cos(x) - x (root around x ≈ 0.739) +Func f = x => Math.Cos(x) - x; + +double root = Brent.Solve(f, + lowerBound: 0, // f(0) = 1 > 0 + upperBound: 1, // f(1) = -0.46 < 0 + tolerance: 1e-10, + maxIterations: 1000); + +Console.WriteLine($"Root: {root:F12}"); // 0.739085133215 +Console.WriteLine($"Verification: f({root}) = {f(root):E3}"); +``` + +### Example: Finding where two functions intersect + +To find where $f(x) = g(x)$, solve $h(x) = f(x) - g(x) = 0$: + +```cs +Func f = x => Math.Exp(-x); +Func g = x => x * x; + +// Find intersection: e^(-x) = x² +Func h = x => f(x) - g(x); + +double intersection = Brent.Solve(h, 0, 1); +Console.WriteLine($"Intersection at x = {intersection:F6}"); +Console.WriteLine($"At this point: f({intersection}) = {f(intersection):F6}"); +Console.WriteLine($"At this point: g({intersection}) = {g(intersection):F6}"); +``` + +### Advantages and Disadvantages + +**Advantages:** +- Very fast convergence (superlinear) +- Guaranteed to converge with bracketing interval +- Automatically switches between methods for optimal performance +- Widely considered the best general-purpose method + +**Disadvantages:** +- Requires bracketing interval +- More complex than simpler methods + +**When to use:** Default choice for most root-finding problems. + +## Secant Method + +The secant method is similar to Newton-Raphson but approximates the derivative using finite differences, eliminating the need for an analytical derivative [[1]](#1). + +### Algorithm + +The secant formula is: + +```math +x_{n+1} = x_n - f(x_n) \frac{x_n - x_{n-1}}{f(x_n) - f(x_{n-1})} +``` + +This approximates the derivative as: + +```math +f'(x_n) \approx \frac{f(x_n) - f(x_{n-1})}{x_n - x_{n-1}} +``` + +### Usage + +```cs +using Numerics.Mathematics; + +// Find root of f(x) = x³ - 2x - 5 +Func f = x => x * x * x - 2 * x - 5; + +double root = Secant.Solve(f, + lowerBound: 2, // First point + upperBound: 3, // Second point + tolerance: 1e-8, + maxIterations: 1000); + +Console.WriteLine($"Root: {root:F10}"); // 2.0945514815 +Console.WriteLine($"Verification: f({root}) = {f(root):E3}"); +``` + +### Advantages and Disadvantages + +**Advantages:** +- Faster than bisection (superlinear convergence) +- Doesn't require derivative +- Only needs one function evaluation per iteration (vs. two for Newton-Raphson) + +**Disadvantages:** +- Not guaranteed to converge +- May diverge for poor initial guesses +- Slower than Newton-Raphson when derivative is available + +**When to use:** When derivative is unavailable or expensive to compute, and you have reasonable initial estimates. + +## Newton-Raphson Method + +Newton-Raphson uses the function's derivative to iteratively approach the root [[1]](#1). It has quadratic convergence, making it very fast when it works. + +### Algorithm + +The Newton-Raphson formula is: + +```math +x_{n+1} = x_n - \frac{f(x_n)}{f'(x_n)} +``` + +Geometrically, this finds where the tangent line at $(x_n, f(x_n))$ crosses the x-axis. + +### Usage + +```cs +using Numerics.Mathematics; + +// Find root of f(x) = x² - 2 (square root of 2) +Func f = x => x * x - 2; +Func df = x => 2 * x; // Derivative: f'(x) = 2x + +double root = NewtonRaphson.Solve(f, df, + firstGuess: 1.0, // Initial guess + tolerance: 1e-12, + maxIterations: 100); + +Console.WriteLine($"√2 = {root:F12}"); // 1.414213562373 +Console.WriteLine($"Verification: {root}² = {root * root:F12}"); +``` + +**Note:** If you don't have an analytical derivative, the ***Numerics*** library's `NumericalDerivative` class can compute it numerically, though this is less efficient: + +```cs +using Numerics.Mathematics; + +Func f = x => x * x - 2; +Func df = x => NumericalDerivative.Derivative(f, x); + +double root = NewtonRaphson.Solve(f, df, 1.0); +``` + +### Robust Newton-Raphson + +The library also provides a robust version that combines Newton-Raphson with bisection to guarantee convergence: + +```cs +// Robust version requires bracketing interval +double root = NewtonRaphson.RobustSolve(f, df, + firstGuess: 1.0, + lowerBound: 0.5, + upperBound: 2.0, + tolerance: 1e-12); +``` + +This method uses Newton-Raphson when it's making good progress, but falls back to bisection if the iteration goes outside the brackets or converges too slowly. + +### Advantages and Disadvantages + +**Advantages:** +- Very fast convergence (quadratic) +- Few iterations needed (typically 4-6) +- Can be extended to systems of equations + +**Disadvantages:** +- Requires derivative (analytical or numerical) +- Not guaranteed to converge +- Can fail for poor initial guesses +- Can diverge or oscillate + +**When to use:** When you have a good initial guess and can provide the derivative, or use the robust version for guaranteed convergence. + +## Example: Solving for Implied Volatility + +A practical example from financial mathematics - solving the Black-Scholes equation for implied volatility: + +```cs +using Numerics.Mathematics; + +// Black-Scholes call option price +double BlackScholesCall(double S, double K, double T, double r, double sigma) +{ + double d1 = (Math.Log(S / K) + (r + 0.5 * sigma * sigma) * T) / (sigma * Math.Sqrt(T)); + double d2 = d1 - sigma * Math.Sqrt(T); + + double N(double x) => 0.5 * (1.0 + Erf(x / Math.Sqrt(2))); + + return S * N(d1) - K * Math.Exp(-r * T) * N(d2); +} + +// Given market price, solve for implied volatility +double S = 100; // Stock price +double K = 100; // Strike price +double T = 1; // Time to maturity (years) +double r = 0.05; // Risk-free rate +double marketPrice = 12.34; // Observed option price + +// Define function: difference between model and market price +Func f = sigma => BlackScholesCall(S, K, T, r, sigma) - marketPrice; + +// Solve for implied volatility using Brent +double impliedVol = Brent.Solve(f, 0.01, 2.0); + +Console.WriteLine($"Implied volatility: {impliedVol:P2}"); +Console.WriteLine($"Verification: BS price = {BlackScholesCall(S, K, T, r, impliedVol):F2}"); +``` + +## Finding Multiple Roots + +To find multiple roots, solve in different intervals: + +```cs +using Numerics.Mathematics; + +// Function with multiple roots: f(x) = sin(x) +// Roots at x = 0, ±π, ±2π, ... +Func f = x => Math.Sin(x); + +// Find roots in different intervals +var roots = new List(); + +// Find positive roots +for (int i = 0; i < 3; i++) +{ + double a = i * Math.PI + 0.1; + double b = (i + 1) * Math.PI - 0.1; + + if (f(a) * f(b) < 0) // Bracket contains a root + { + double root = Brent.Solve(f, a, b); + roots.Add(root); + } +} + +Console.WriteLine("Roots of sin(x):"); +foreach (var root in roots) +{ + Console.WriteLine($" x = {root:F10} (≈ {root / Math.PI:F2}π)"); +} +``` + +## Example: Critical Points via Root Finding + +Find critical points of a function by solving $f'(x) = 0$: + +```cs +using Numerics.Mathematics; + +// Find critical points of f(x) = x³ - 3x² + 2 +Func f = x => x * x * x - 3 * x * x + 2; +Func df = x => 3 * x * x - 6 * x; // f'(x) + +// f'(x) = 3x² - 6x = 3x(x - 2) +// Critical points at x = 0 and x = 2 + +double cp1 = Brent.Solve(df, -1, 1); // Find critical point near 0 +double cp2 = Brent.Solve(df, 1, 3); // Find critical point near 2 + +Console.WriteLine($"Critical point 1: x = {cp1:F6}, f(x) = {f(cp1):F6}"); +Console.WriteLine($"Critical point 2: x = {cp2:F6}, f(x) = {f(cp2):F6}"); + +// Determine if max or min using second derivative +Func d2f = x => 6 * x - 6; // f''(x) + +Console.WriteLine($"At x={cp1}: f''(x)={d2f(cp1):F1} → " + + (d2f(cp1) > 0 ? "Local minimum" : "Local maximum")); +Console.WriteLine($"At x={cp2}: f''(x)={d2f(cp2):F1} → " + + (d2f(cp2) > 0 ? "Local minimum" : "Local maximum")); +``` + +## Choosing a Root Finding Method + +| Scenario | Recommended Method | Notes | +|----------|-------------------|-------| +| General purpose | Brent | Best balance of speed and robustness | +| Have derivative | Newton-Raphson (robust version) | Fastest convergence | +| No derivative | Secant or Brent | Brent more robust, Secant faster | +| Difficult function | Bisection | Guaranteed convergence, but slow | +| Need absolute certainty | Bisection or Brent | Both guarantee convergence | +| Very smooth function | Newton-Raphson | Quadratic convergence | +| Poor initial guess | Brent or Robust Newton-Raphson | Fall back to bisection when needed | + +## Convergence Criteria + +All root-finding methods in ***Numerics*** use combined criteria: + +1. **Function value tolerance**: $|f(x)| < \text{tolerance}$ +2. **Parameter tolerance**: $|x_{n+1} - x_n| < \text{tolerance}$ + +Both must be satisfied for convergence. The default tolerance is $10^{-8}$. + +## Best Practices + +1. **Bracket First**: For robust methods (Bisection, Brent), ensure your initial interval brackets the root by checking that $f(a) \cdot f(b) < 0$. + +2. **Plot the Function**: Before root finding, plot the function to understand its behavior and identify approximate root locations. + +3. **Check Convergence**: Always verify the solution: + ```cs + double root = Brent.Solve(f, a, b); + Console.WriteLine($"f({root}) = {f(root):E3}"); // Should be near zero + ``` + +4. **Handle No Root Cases**: Check if a root exists in your interval before calling the solver: + ```cs + if (f(a) * f(b) >= 0) + { + Console.WriteLine("Function doesn't change sign - root may not exist"); + } + ``` + +5. **Multiple Roots**: To find all roots, divide the search space and solve in each bracketing interval. + +6. **Scale Appropriately**: Normalize your function if it has extreme magnitudes to avoid numerical issues. + +7. **Set Reasonable Tolerances**: Tighter tolerances require more iterations: + ```cs + double root = Brent.Solve(f, a, b, tolerance: 1e-12); + ``` + +8. **Use Robust Versions for Production**: When failure is not an option, use Brent or Robust Newton-Raphson. + +## Common Pitfalls + +1. **No Root in Interval**: Bisection and Brent will fail if $f(a)$ and $f(b)$ have the same sign. + +2. **Multiple Roots**: These methods find one root at a time. Multiple roots in the same interval may cause issues. + +3. **Flat Regions**: All methods struggle where $f'(x) \approx 0$ (Newton-Raphson especially). + +4. **Discontinuities**: Newton-Raphson and Secant may fail near discontinuities. Brent and Bisection are more robust. + +5. **Poor Initial Guess**: Newton-Raphson is sensitive to the starting point. Use Robust Newton-Raphson or Brent if initial guess quality is uncertain. + +## Error Handling + +```cs +using Numerics.Mathematics; + +try +{ + double root = Brent.Solve(f, a, b, + tolerance: 1e-10, + maxIterations: 1000, + reportFailure: true); // Throw exception on failure + + Console.WriteLine($"Root found: {root}"); +} +catch (Exception ex) +{ + Console.WriteLine($"Root finding failed: {ex.Message}"); +} + +// Or suppress exceptions +double root2 = Brent.Solve(f, a, b, reportFailure: false); +if (double.IsNaN(root2)) +{ + Console.WriteLine("Failed to converge"); +} +``` + +## Performance Comparison + +For the function $f(x) = x^3 - 2x - 5$ with root at $x \approx 2.0946$: + +| Method | Initial Values | Iterations | Function Evals | Time (relative) | +|--------|---------------|-----------|----------------|-----------------| +| Bisection | [2, 3] | 27 | 27 | 1.0× | +| Secant | 2, 3 | 6 | 6 | 0.22× | +| Newton-Raphson | 2 | 4 | 8 | 0.30× | +| Brent | [2, 3] | 5 | 8 | 0.30× | + +Newton-Raphson and Brent are typically fastest, while Bisection is slowest but most reliable. + +--- + +## References + +[1] Press, W. H., Teukolsky, S. A., Vetterling, W. T., & Flannery, B. P. (2007). *Numerical Recipes: The Art of Scientific Computing* (3rd ed.). Cambridge University Press. + +[2] Brent, R. P. (1973). *Algorithms for Minimization Without Derivatives*. Prentice-Hall, Englewood Cliffs, NJ. + +--- + +[← Previous: Optimization](optimization.md) | [Back to Index](../index.md) | [Next: Linear Algebra →](linear-algebra.md) diff --git a/docs/mathematics/special-functions.md b/docs/mathematics/special-functions.md new file mode 100644 index 00000000..6ca62ad1 --- /dev/null +++ b/docs/mathematics/special-functions.md @@ -0,0 +1,388 @@ +# Special Functions + +[← Previous: Linear Algebra](linear-algebra.md) | [Back to Index](../index.md) | [Next: ODE Solvers →](ode-solvers.md) + +The ***Numerics*** library provides essential special functions commonly used in statistical distributions, numerical analysis, and scientific computing. These include Gamma, Beta, Error functions, and combinatorial functions. + +## Gamma Function + +The Gamma function Γ(x) extends the factorial function to real and complex numbers: Γ(n) = (n-1)! for positive integers. + +### Basic Gamma Function + +```cs +using Numerics.Mathematics.SpecialFunctions; + +// Gamma function +double g1 = Gamma.Function(5.0); // Γ(5) = 4! = 24 +double g2 = Gamma.Function(0.5); // Γ(0.5) = √π ≈ 1.772 +double g3 = Gamma.Function(3.5); // Γ(3.5) ≈ 3.323 + +Console.WriteLine($"Γ(5) = {g1:F2}"); +Console.WriteLine($"Γ(0.5) = {g2:F6}"); +Console.WriteLine($"Γ(3.5) = {g3:F3}"); + +// Verify: Γ(n+1) = n·Γ(n) +double check = 3.5 * Gamma.Function(3.5); +Console.WriteLine($"4.5·Γ(3.5) = {check:F3}"); +Console.WriteLine($"Γ(4.5) = {Gamma.Function(4.5):F3}"); +``` + +### Log-Gamma Function + +For large arguments, use log-gamma to avoid overflow: + +```cs +// Regular gamma would overflow for large x +double x = 200.0; + +// Use log-gamma +double logGamma = Gamma.LogGamma(x); +Console.WriteLine($"ln(Γ(200)) = {logGamma:F2}"); + +// Gamma itself would be huge: exp(logGamma) +// Don't compute directly - work in log space +``` + +### Digamma and Trigamma + +Derivatives of the log-gamma function: + +```cs +// Digamma: ψ(x) = d/dx[ln(Γ(x))] = Γ'(x)/Γ(x) +double digamma = Gamma.Digamma(2.0); + +// Trigamma: ψ'(x) = d²/dx²[ln(Γ(x))] +double trigamma = Gamma.Trigamma(2.0); + +Console.WriteLine($"ψ(2) = {digamma:F6}"); +Console.WriteLine($"ψ'(2) = {trigamma:F6}"); + +// Applications: moment matching in Gamma distribution MLE +``` + +### Incomplete Gamma Functions + +Used in chi-squared and gamma distributions: + +```cs +// Lower incomplete gamma: γ(a,x) = ∫₀ˣ t^(a-1)e^(-t) dt +double lowerIncomplete = Gamma.LowerIncomplete(a: 2.0, x: 3.0); + +// Upper incomplete gamma: Γ(a,x) = ∫ₓ^∞ t^(a-1)e^(-t) dt +double upperIncomplete = Gamma.UpperIncomplete(a: 2.0, x: 3.0); + +// Verify: γ(a,x) + Γ(a,x) = Γ(a) +double sum = lowerIncomplete + upperIncomplete; +double gamma = Gamma.Function(2.0); + +Console.WriteLine($"Lower: {lowerIncomplete:F6}"); +Console.WriteLine($"Upper: {upperIncomplete:F6}"); +Console.WriteLine($"Sum: {sum:F6}, Γ(2): {gamma:F6}"); +``` + +### Regularized Incomplete Gamma + +Normalized version: P(a,x) = γ(a,x)/Γ(a) + +```cs +// Regularized incomplete gamma (CDF of Gamma distribution) +double P = Gamma.Incomplete(x: 3.0, alpha: 2.0); + +Console.WriteLine($"P(2, 3) = {P:F6}"); +Console.WriteLine("This equals the Gamma(2,1) CDF at x=3"); + +// Inverse: find x such that P(a,x) = p +double xInv = Gamma.InverseLowerIncomplete(a: 2.0, y: 0.9); +Console.WriteLine($"P(2, {xInv:F3}) = 0.9"); +``` + +## Beta Function + +The Beta function relates to the Gamma function: B(a,b) = Γ(a)Γ(b)/Γ(a+b) + +### Basic Beta Function + +```cs +using Numerics.Mathematics.SpecialFunctions; + +// Beta function +double beta = Beta.Function(a: 2.0, b: 3.0); + +// Verify relation to Gamma +double gamma_a = Gamma.Function(2.0); +double gamma_b = Gamma.Function(3.0); +double gamma_ab = Gamma.Function(5.0); +double betaCheck = gamma_a * gamma_b / gamma_ab; + +Console.WriteLine($"B(2,3) = {beta:F6}"); +Console.WriteLine($"Γ(2)Γ(3)/Γ(5) = {betaCheck:F6}"); +``` + +### Incomplete Beta Function + +Used in Beta distribution and Student's t-test: + +```cs +// Incomplete beta: Bₓ(a,b) = ∫₀ˣ t^(a-1)(1-t)^(b-1) dt +double incompleteBeta = Beta.Incomplete(a: 2.0, b: 3.0, x: 0.4); + +Console.WriteLine($"Bₓ(2,3,0.4) = {incompleteBeta:F6}"); +Console.WriteLine("This equals integral from 0 to 0.4"); + +// Regularized incomplete beta (CDF of Beta distribution) +double I = incompleteBeta / Beta.Function(2.0, 3.0); +Console.WriteLine($"I(2,3,0.4) = {I:F6}"); +``` + +### Inverse Incomplete Beta + +Quantile function for Beta distribution: + +```cs +// Find x such that I(a,b,x) = p +double x = Beta.IncompleteInverse(aa: 2.0, bb: 3.0, yy0: 0.5); + +Console.WriteLine($"50th percentile of Beta(2,3): {x:F4}"); + +// Verify +double check = Beta.Incomplete(2.0, 3.0, x) / Beta.Function(2.0, 3.0); +Console.WriteLine($"Verification: I(2,3,{x:F4}) = {check:F6}"); +``` + +## Error Function + +The error function is the integral of the Gaussian distribution: + +### Error Function and Complement + +```cs +using Numerics.Mathematics.SpecialFunctions; + +// Error function: erf(x) = (2/√π) ∫₀ˣ e^(-t²) dt +double erf = Erf.Function(1.0); + +// Complementary error function: erfc(x) = 1 - erf(x) +double erfc = Erf.Erfc(1.0); + +Console.WriteLine($"erf(1) = {erf:F6}"); +Console.WriteLine($"erfc(1) = {erfc:F6}"); +Console.WriteLine($"Sum = {erf + erfc:F6}"); // Should be 1.0 + +// Relation to normal distribution +// Φ(x) = 0.5[1 + erf(x/√2)] +double x = 1.0; +double phi = 0.5 * (1 + Erf.Function(x / Math.Sqrt(2))); +Console.WriteLine($"Φ(1) via erf = {phi:F6}"); +``` + +### Inverse Error Function + +```cs +// Inverse erf: find x such that erf(x) = y +double y = 0.5; +double xInv = Erf.InverseErf(y); + +Console.WriteLine($"erf⁻¹(0.5) = {xInv:F6}"); +Console.WriteLine($"Verification: erf({xInv:F6}) = {Erf.Function(xInv):F6}"); + +// Inverse erfc +double xInvC = Erf.InverseErfc(0.5); +Console.WriteLine($"erfc⁻¹(0.5) = {xInvC:F6}"); +``` + +## Factorial and Combinatorics + +### Factorial + +```cs +using Numerics.Mathematics.SpecialFunctions; + +// Integer factorial +double fact5 = Factorial.Function(5); // 5! = 120 +double fact10 = Factorial.Function(10); // 10! = 3,628,800 + +Console.WriteLine($"5! = {fact5:F0}"); +Console.WriteLine($"10! = {fact10:N0}"); + +// Log factorial for large numbers +double logFact100 = Factorial.LogFactorial(100); +Console.WriteLine($"ln(100!) = {logFact100:F2}"); + +// Too large for double: use log space +double fact100 = Math.Exp(logFact100); +Console.WriteLine($"100! ≈ {fact100:E2}"); +``` + +### Binomial Coefficients + +```cs +// Binomial coefficient: C(n,k) = n!/(k!(n-k)!) +double c_10_3 = Factorial.BinomialCoefficient(n: 10, k: 3); + +Console.WriteLine($"C(10,3) = {c_10_3:F0}"); // 120 + +// Verify: 10!/(3!·7!) = 3,628,800/(6·5040) = 120 + +// Pascal's triangle relation: C(n,k) = C(n-1,k-1) + C(n-1,k) +double check = Factorial.BinomialCoefficient(9, 2) + Factorial.BinomialCoefficient(9, 3); +Console.WriteLine($"C(9,2) + C(9,3) = {check:F0}"); +Console.WriteLine($"C(10,3) = {c_10_3:F0}"); +``` + +### Combinations + +```cs +// Generate all combinations of m items from n +int m = 3; // Choose 3 +int n = 5; // From 5 + +var combinations = Factorial.FindCombinations(m, n); + +Console.WriteLine($"All ways to choose {m} items from {n}:"); +foreach (var combo in combinations) +{ + Console.WriteLine($" [{string.Join(", ", combo)}]"); +} + +// Total count should equal C(5,3) = 10 +int count = combinations.Count(); +Console.WriteLine($"Total: {count} combinations"); +``` + +## Practical Applications + +### Example 1: Gamma Distribution Moments + +```cs +// Gamma distribution with shape α and rate β has: +// Mean = α/β +// Variance = α/β² + +double alpha = 5.0; +double beta = 2.0; + +// Can be computed using Gamma function +double mean = alpha / beta; +double variance = alpha / (beta * beta); + +Console.WriteLine($"Gamma({alpha}, {beta}) distribution:"); +Console.WriteLine($" Mean = {mean:F2}"); +Console.WriteLine($" Variance = {variance:F2}"); +Console.WriteLine($" Std Dev = {Math.Sqrt(variance):F2}"); + +// Factorial moment: E[X(X-1)...(X-k+1)] = Γ(α+k)/(βᵏΓ(α)) +int k = 2; +double factMoment = Gamma.Function(alpha + k) / (Math.Pow(beta, k) * Gamma.Function(alpha)); +Console.WriteLine($" E[X(X-1)] = {factMoment:F2}"); +``` + +### Example 2: Normal Distribution CDF + +```cs +// Compute Normal(0,1) CDF using error function +double x = 1.5; + +// Φ(x) = 0.5[1 + erf(x/√2)] +double cdf = 0.5 * (1.0 + Erf.Function(x / Math.Sqrt(2.0))); + +Console.WriteLine($"Φ({x}) = {cdf:F6}"); +Console.WriteLine("Compare with Normal distribution class for verification"); + +// Tail probability +double tail = 1.0 - cdf; +Console.WriteLine($"P(X > {x}) = {tail:F6}"); + +// Using erfc for better precision in tails +double tailAlt = 0.5 * Erf.Erfc(x / Math.Sqrt(2.0)); +Console.WriteLine($"P(X > {x}) via erfc = {tailAlt:F6}"); +``` + +### Example 3: Chi-Squared CDF + +```cs +// Chi-squared distribution with k degrees of freedom +// CDF = P(k/2, x/2) where P is regularized lower incomplete gamma + +int k = 5; // Degrees of freedom +double x = 8.0; + +// CDF at x +double cdf = Gamma.Incomplete(x: x / 2.0, alpha: k / 2.0); + +Console.WriteLine($"Chi-squared({k}) CDF at {x}:"); +Console.WriteLine($" P(X ≤ {x}) = {cdf:F6}"); + +// Critical value for α=0.05 +double alpha_level = 0.05; +double critical = 2.0 * Gamma.InverseLowerIncomplete(a: k / 2.0, y: 1.0 - alpha_level); +Console.WriteLine($" 95th percentile = {critical:F3}"); +``` + +### Example 4: Beta Distribution + +```cs +// Beta(a,b) distribution CDF using incomplete beta +double a = 2.0; +double b = 3.0; +double x = 0.4; + +// CDF = I(x;a,b) = Bₓ(a,b)/B(a,b) +double incompleteBeta = Beta.Incomplete(a, b, x); +double betaFunc = Beta.Function(a, b); +double cdf = incompleteBeta / betaFunc; + +Console.WriteLine($"Beta({a},{b}) CDF at x={x}:"); +Console.WriteLine($" P(X ≤ {x}) = {cdf:F6}"); + +// Median +double median = Beta.IncompleteInverse(a, b, 0.5); +Console.WriteLine($" Median = {median:F4}"); + +// 90% confidence interval +double lower = Beta.IncompleteInverse(a, b, 0.05); +double upper = Beta.IncompleteInverse(a, b, 0.95); +Console.WriteLine($" 90% CI: [{lower:F4}, {upper:F4}]"); +``` + +### Example 5: Stirling's Approximation + +```cs +// Stirling's approximation for large factorials +// ln(n!) ≈ n·ln(n) - n + 0.5·ln(2πn) + +int n = 50; + +double exactLogFact = Factorial.LogFactorial(n); +double stirling = n * Math.Log(n) - n + 0.5 * Math.Log(2 * Math.PI * n); + +Console.WriteLine($"ln({n}!):"); +Console.WriteLine($" Exact = {exactLogFact:F6}"); +Console.WriteLine($" Stirling = {stirling:F6}"); +Console.WriteLine($" Error = {Math.Abs(exactLogFact - stirling):F6}"); + +// Stirling is very accurate for large n +double relativeError = Math.Abs(exactLogFact - stirling) / exactLogFact; +Console.WriteLine($" Relative error = {relativeError:P4}"); +``` + +## Function Summary + +| Function | Purpose | Key Methods | +|----------|---------|-------------| +| **Gamma** | Factorial extension | `Function()`, `LogGamma()`, `Digamma()` | +| **Incomplete Gamma** | Chi-squared, Gamma CDF | `LowerIncomplete()`, `UpperIncomplete()` | +| **Beta** | Beta distribution | `Function()`, `Incomplete()` | +| **Error** | Normal distribution | `Function()`, `Erfc()`, `InverseErf()` | +| **Factorial** | Combinatorics | `Function()`, `BinomialCoefficient()` | + +## Implementation Notes + +- All functions use high-precision approximations +- Log-space variants prevent overflow for large arguments +- Inverse functions use Newton-Raphson iteration +- Special care for edge cases and numerical stability + +--- + +[← Previous: Linear Algebra](linear-algebra.md) | [Back to Index](../index.md) | [Next: ODE Solvers →](ode-solvers.md) diff --git a/docs/references.md b/docs/references.md new file mode 100644 index 00000000..1a586bbb --- /dev/null +++ b/docs/references.md @@ -0,0 +1,203 @@ +# References + +This is a consolidated bibliography of all references cited throughout the ***Numerics*** library documentation. + +--- + +## General Numerical Methods + +[1] Press, W. H., Teukolsky, S. A., Vetterling, W. T., & Flannery, B. P. (2007). *Numerical Recipes: The Art of Scientific Computing* (3rd ed.). Cambridge University Press. + +[2] Burden, R. L., & Faires, J. D. (2010). *Numerical Analysis* (9th ed.). Brooks/Cole. + +[3] Stoer, J., & Bulirsch, R. (2002). *Introduction to Numerical Analysis* (3rd ed.). Springer. + +--- + +## Probability and Statistics + +[4] Johnson, N. L., Kotz, S., & Balakrishnan, N. (1994). *Continuous Univariate Distributions* (2nd ed., Vols. 1-2). Wiley. + +[5] Hosking, J. R. M. (1990). L-moments: Analysis and estimation of distributions using linear combinations of order statistics. *Journal of the Royal Statistical Society: Series B*, 52(1), 105-124. + +[6] Hosking, J. R. M., & Wallis, J. R. (1997). *Regional Frequency Analysis: An Approach Based on L-Moments*. Cambridge University Press. + +[7] Coles, S. (2001). *An Introduction to Statistical Modeling of Extreme Values*. Springer. + +[8] Pawitan, Y. (2001). *In All Likelihood: Statistical Modelling and Inference Using Likelihood*. Oxford University Press. + +[9] Wilks, D. S. (2011). *Statistical Methods in the Atmospheric Sciences* (3rd ed.). Academic Press. + +--- + +## Hydrology and Water Resources + +[10] England, J. F., et al. (2019). Guidelines for Determining Flood Flow Frequency—Bulletin 17C. *U.S. Geological Survey Techniques and Methods*, Book 4, Chapter B5. + +[11] Stedinger, J. R., Vogel, R. M., & Foufoula-Georgiou, E. (1993). Frequency analysis of extreme events. In D. R. Maidment (Ed.), *Handbook of Hydrology* (Chapter 18). McGraw-Hill. + +[12] Helsel, D. R., Hirsch, R. M., Ryberg, K. R., Archfield, S. A., & Gilroy, E. J. (2020). *Statistical Methods in Water Resources*. U.S. Geological Survey Techniques and Methods, Book 4, Chapter A3. + +[13] Cunnane, C. (1978). Unbiased plotting positions—A review. *Journal of Hydrology*, 37(3-4), 205-222. + +[14] Cohn, T. A., England, J. F., Berenbrock, C. E., Mason, R. R., Stedinger, J. R., & Lamontagne, J. R. (2013). A generalized Grubbs-Beck test statistic for detecting multiple potentially influential low outliers in flood series. *Water Resources Research*, 49(8), 5047-5058. + +[15] Eckhardt, K. (2005). How to construct recursive digital filters for baseflow separation. *Hydrological Processes*, 19(2), 507-515. + +--- + +## Model Evaluation + +[16] Moriasi, D. N., Arnold, J. G., Van Liew, M. W., Bingner, R. L., Harmel, R. D., & Veith, T. L. (2007). Model evaluation guidelines for systematic quantification of accuracy in watershed simulations. *Transactions of the ASABE*, 50(3), 885-900. + +[17] Moriasi, D. N., Gitau, M. W., Pai, N., & Daggupati, P. (2015). Hydrologic and water quality models: Performance measures and evaluation criteria. *Transactions of the ASABE*, 58(6), 1763-1785. + +[18] Nash, J. E., & Sutcliffe, J. V. (1970). River flow forecasting through conceptual models part I—A discussion of principles. *Journal of Hydrology*, 10(3), 282-290. + +[19] Gupta, H. V., Kling, H., Yilmaz, K. K., & Martinez, G. F. (2009). Decomposition of the mean squared error and NSE performance criteria: Implications for improving hydrological modelling. *Journal of Hydrology*, 377(1-2), 80-91. + +[20] Legates, D. R., & McCabe, G. J. (1999). Evaluating the use of "goodness-of-fit" measures in hydrologic and hydroclimatic model validation. *Water Resources Research*, 35(1), 233-241. + +[21] Burnham, K. P., & Anderson, D. R. (2002). *Model Selection and Multimodel Inference: A Practical Information-Theoretic Approach* (2nd ed.). Springer. + +--- + +## Copulas and Multivariate Analysis + +[22] Nelsen, R. B. (2006). *An Introduction to Copulas* (2nd ed.). Springer. + +[23] Joe, H. (1997). *Multivariate Models and Dependence Concepts*. Chapman & Hall. + +[24] Genest, C., & Favre, A.-C. (2007). Everything you always wanted to know about copula modeling but were afraid to ask. *Journal of Hydrologic Engineering*, 12(4), 347-368. + +[25] Salvadori, G., De Michele, C., Kottegoda, N. T., & Rosso, R. (2007). *Extremes in Nature: An Approach Using Copulas*. Springer. + +[26] Salvadori, G., & De Michele, C. (2004). Frequency analysis via copulas: Theoretical aspects and applications to hydrological events. *Water Resources Research*, 40(12). + +--- + +## Numerical Integration + +[27] Piessens, R., de Doncker-Kapenga, E., Überhuber, C. W., & Kahaner, D. K. (1983). *QUADPACK: A Subroutine Package for Automatic Integration*. Springer. + +[28] Press, W. H., & Farrar, G. R. (1990). Recursive stratified sampling for multidimensional Monte Carlo integration. *Computers in Physics*, 4(2), 190-195. + +[29] Lepage, G. P. (1978). A new algorithm for adaptive multidimensional integration. *Journal of Computational Physics*, 27(2), 192-203. + +--- + +## Numerical Differentiation + +[30] Ridders, C. J. F. (1982). Accurate computation of F'(x) and F'(x)F''(x). *Advances in Engineering Software*, 4(2), 75-76. + +--- + +## Optimization + +[31] Nocedal, J., & Wright, S. J. (2006). *Numerical Optimization* (2nd ed.). Springer. + +[32] Nelder, J. A., & Mead, R. (1965). A simplex method for function minimization. *The Computer Journal*, 7(4), 308-313. + +[33] Storn, R., & Price, K. (1997). Differential evolution—A simple and efficient heuristic for global optimization over continuous spaces. *Journal of Global Optimization*, 11(4), 341-359. + +[34] Duan, Q., Sorooshian, S., & Gupta, V. K. (1994). Optimal use of the SCE-UA global optimization method for calibrating watershed models. *Journal of Hydrology*, 158(3-4), 265-284. + +[35] Kennedy, J., & Eberhart, R. (1995). Particle swarm optimization. *Proceedings of ICNN'95*, 4, 1942-1948. + +--- + +## Linear Algebra + +[36] Golub, G. H., & Van Loan, C. F. (2013). *Matrix Computations* (4th ed.). Johns Hopkins University Press. + +[37] Trefethen, L. N., & Bau, D. (1997). *Numerical Linear Algebra*. SIAM. + +--- + +## Root Finding + +[38] Brent, R. P. (1973). *Algorithms for Minimization without Derivatives*. Prentice-Hall. + +--- + +## Interpolation + +[39] Akima, H. (1970). A new method of interpolation and smooth curve fitting based on local procedures. *Journal of the ACM*, 17(4), 589-602. + +[40] de Boor, C. (2001). *A Practical Guide to Splines* (Rev. ed.). Springer. + +--- + +## Random Number Generation + +[41] Matsumoto, M., & Nishimura, T. (1998). Mersenne Twister: A 623-dimensionally equidistributed uniform pseudo-random number generator. *ACM Transactions on Modeling and Computer Simulation*, 8(1), 3-30. + +[42] Niederreiter, H. (1992). *Random Number Generation and Quasi-Monte Carlo Methods*. SIAM. + +[43] McKay, M. D., Beckman, R. J., & Conover, W. J. (1979). A comparison of three methods for selecting values of input variables in the analysis of output from a computer code. *Technometrics*, 21(2), 239-245. + +[44] Owen, A. B. (2003). Quasi-Monte Carlo sampling. In *Monte Carlo Ray Tracing: Siggraph 2003 Course 44*, 69-88. + +--- + +## MCMC and Bayesian Methods + +[45] Gelman, A., Carlin, J. B., Stern, H. S., Dunson, D. B., Vehtari, A., & Rubin, D. B. (2013). *Bayesian Data Analysis* (3rd ed.). CRC Press. + +[46] Robert, C. P., & Casella, G. (2004). *Monte Carlo Statistical Methods* (2nd ed.). Springer. + +[47] Haario, H., Saksman, E., & Tamminen, J. (2001). An adaptive Metropolis algorithm. *Bernoulli*, 7(2), 223-242. + +[48] ter Braak, C. J. F., & Vrugt, J. A. (2008). Differential Evolution Markov Chain with snooker updater and fewer chains. *Statistics and Computing*, 18(4), 435-446. + +[49] Neal, R. M. (2011). MCMC using Hamiltonian dynamics. In *Handbook of Markov Chain Monte Carlo* (pp. 113-162). CRC Press. + +[50] Vehtari, A., Gelman, A., Simpson, D., Carpenter, B., & Bürkner, P. C. (2021). Rank-normalization, folding, and localization: An improved R-hat for assessing convergence of MCMC. *Bayesian Analysis*, 16(2), 667-718. + +[51] Vrugt, J. A. (2016). Markov chain Monte Carlo simulation using the DREAM software package: Theory, concepts, and MATLAB implementation. *Environmental Modelling & Software*, 75, 273-316. + +--- + +## Uncertainty Analysis + +[52] Efron, B., & Tibshirani, R. J. (1993). *An Introduction to the Bootstrap*. Chapman & Hall. + +[53] Stedinger, J. R. (1983). Confidence intervals for design events. *Journal of Hydraulic Engineering*, 109(1), 13-27. + +[54] Hirsch, R. M., & Stedinger, J. R. (1987). Plotting positions for historical floods and their precision. *Water Resources Research*, 23(4), 715-727. + +--- + +## Special Distributions + +[55] Weibull, W. (1951). A statistical distribution function of wide applicability. *Journal of Applied Mechanics*, 18(3), 293-297. + +[56] Vose, D. (2008). *Risk Analysis: A Quantitative Guide* (3rd ed.). Wiley. + +[57] McLachlan, G., & Peel, D. (2000). *Finite Mixture Models*. Wiley. + +[58] Silverman, B. W. (1986). *Density Estimation for Statistics and Data Analysis*. Chapman & Hall. + +--- + +## Goodness-of-Fit Tests + +[59] D'Agostino, R. B., & Stephens, M. A. (1986). *Goodness-of-Fit Techniques*. Marcel Dekker. + +--- + +## Time Series + +[60] Box, G. E. P., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015). *Time Series Analysis: Forecasting and Control* (5th ed.). Wiley. + +[61] Box, G. E. P., & Cox, D. R. (1964). An analysis of transformations. *Journal of the Royal Statistical Society: Series B*, 26(2), 211-252. + +--- + +## Data Sources + +[62] U.S. Geological Survey. *USGS Water Services*. https://waterservices.usgs.gov/ + +[63] Environment and Climate Change Canada. *Historical Hydrometric Data*. https://wateroffice.ec.gc.ca/ + +[64] Australian Bureau of Meteorology. *Water Data Online*. http://www.bom.gov.au/waterdata/ diff --git a/docs/sampling/convergence-diagnostics.md b/docs/sampling/convergence-diagnostics.md new file mode 100644 index 00000000..dad65a55 --- /dev/null +++ b/docs/sampling/convergence-diagnostics.md @@ -0,0 +1,515 @@ +# MCMC Convergence Diagnostics + +[← Previous: MCMC Methods](mcmc.md) | [Back to Index](../index.md) + +Convergence diagnostics assess whether MCMC samplers have reached their stationary distribution and provide reliable samples from the posterior. The ***Numerics*** library provides essential diagnostic tools including Gelman-Rubin statistic and Effective Sample Size. + +## Why Convergence Matters + +MCMC samplers: +1. Start from arbitrary initial values +2. Explore parameter space stochastically +3. Eventually converge to target distribution +4. Must discard "burn-in" samples + +**Key questions:** +- Have chains converged to stationary distribution? +- How many independent samples do we have? +- Is the warmup period sufficient? + +## Gelman-Rubin Statistic (R̂) + +The Gelman-Rubin diagnostic compares within-chain and between-chain variance [[1]](#1). Values near 1.0 indicate convergence. + +### Computing R̂ + +```cs +using Numerics.Sampling.MCMC; +using Numerics.Mathematics.Optimization; + +// Run sampler with multiple chains +var sampler = new ARWMH(priors, logLikelihood); +sampler.NumberOfChains = 4; +sampler.WarmupIterations = 2000; +sampler.Iterations = 5000; +sampler.Sample(); + +// Get chains +var chains = new List>(); +// Extract chains from sampler output +// (Implementation depends on sampler structure) + +// Compute Gelman-Rubin for each parameter +int warmup = sampler.WarmupIterations; +double[] rHat = MCMCDiagnostics.GelmanRubin(chains, warmup); + +Console.WriteLine("Gelman-Rubin Diagnostics (R̂):"); +for (int i = 0; i < rHat.Length; i++) +{ + Console.WriteLine($" Parameter {i}: R̂ = {rHat[i]:F4}"); + + if (rHat[i] < 1.1) + Console.WriteLine(" ✓ Converged"); + else if (rHat[i] < 1.2) + Console.WriteLine(" ⚠ Marginal - run longer"); + else + Console.WriteLine(" ✗ Not converged - investigate"); +} +``` + +### Interpretation + +| R̂ Value | Interpretation | Action | +|---------|---------------|--------| +| R̂ < 1.01 | Excellent convergence | Proceed | +| R̂ < 1.1 | Good convergence | Safe to use | +| 1.1 ≤ R̂ < 1.2 | Marginal | Run longer | +| R̂ ≥ 1.2 | Poor convergence | Investigate | + +**Formula:** +``` +R̂ = √(Var_total / W) + +Where: +- W = within-chain variance (average) +- B = between-chain variance +- Var_total = ((n-1)/n)W + (1/n)B +``` + +### Common Causes of High R̂ + +1. **Insufficient warmup** - Chains haven't reached stationarity +2. **Poor mixing** - Chains explore slowly +3. **Multimodal posterior** - Chains stuck in different modes +4. **Bad initialization** - Starting values too extreme +5. **Wrong sampler** - Algorithm not suited to problem + +## Effective Sample Size (ESS) + +ESS quantifies number of independent samples, accounting for autocorrelation [[2]](#2). + +### Computing ESS + +```cs +// For single parameter series +double[] samples = /* Extract parameter samples from chain */; + +double ess = MCMCDiagnostics.EffectiveSampleSize(samples); + +Console.WriteLine($"Effective Sample Size: {ess:F0}"); +Console.WriteLine($"Actual samples: {samples.Length}"); +Console.WriteLine($"Efficiency: {ess / samples.Length:P1}"); + +// Rule of thumb: ESS > 100 per parameter +if (ess < 100) + Console.WriteLine("⚠ Warning: Low ESS - run longer or thin more"); +``` + +### ESS Across All Parameters + +```cs +// Compute ESS for all parameters across chains +double[] essValues = MCMCDiagnostics.EffectiveSampleSize(chains, out double[][,] avgACF); + +Console.WriteLine("Effective Sample Size by Parameter:"); +for (int i = 0; i < essValues.Length; i++) +{ + Console.WriteLine($" θ{i}: ESS = {essValues[i]:F0}"); +} + +// Check minimum ESS +double minESS = essValues.Min(); +Console.WriteLine($"\nMinimum ESS: {minESS:F0}"); + +if (minESS > 400) + Console.WriteLine("✓ Excellent: ESS > 400"); +else if (minESS > 100) + Console.WriteLine("✓ Good: ESS > 100"); +else + Console.WriteLine("✗ Poor: ESS < 100 - need more samples"); +``` + +### Interpretation + +**Guidelines:** +- **ESS > 400**: Excellent - precise estimates +- **ESS > 100**: Good - adequate for most purposes +- **ESS < 100**: Poor - increase iterations or improve mixing + +**Autocorrelation impact:** +- High autocorrelation → Low ESS → Need more iterations +- Good mixing → High ESS → Efficient sampling + +### ESS Formula + +``` +ESS = N / (1 + 2·Σ ρ_k) + +Where: +- N = number of samples +- ρ_k = autocorrelation at lag k +- Sum until ρ_k becomes negligible +``` + +## Autocorrelation + +### Visualizing Autocorrelation + +```cs +// Compute autocorrelation function +// (avgACF from ESS calculation above) + +Console.WriteLine("Autocorrelation at selected lags:"); +Console.WriteLine("Lag | ACF"); +Console.WriteLine("-----|------"); + +for (int lag = 0; lag <= 20; lag += 5) +{ + // Access from avgACF[parameter][chain, lag] + double acf = avgACF[0][0, lag]; // Parameter 0, Chain 0 + Console.WriteLine($"{lag,4} | {acf,5:F3}"); +} + +// Ideal: ACF drops quickly to zero +// Problem: ACF remains high (slow decorrelation) +``` + +### Autocorrelation Guidelines + +| Lag-1 ACF | Interpretation | ESS Impact | +|-----------|---------------|------------| +| < 0.1 | Excellent mixing | ESS ≈ N | +| 0.1-0.3 | Good mixing | ESS ≈ 0.5N | +| 0.3-0.6 | Moderate | ESS ≈ 0.2N | +| > 0.6 | Poor mixing | ESS << 0.1N | + +## Minimum Sample Size + +Determine required sample size for desired precision: + +```cs +// For quantile estimation +double quantile = 0.99; // 100-year event +double tolerance = 0.01; // ±1% of true quantile +double probability = 0.95; // 95% confidence + +int minN = MCMCDiagnostics.MinimumSampleSize(quantile, tolerance, probability); + +Console.WriteLine($"Minimum sample size needed:"); +Console.WriteLine($" For {quantile:P1} quantile"); +Console.WriteLine($" With ±{tolerance:P1} tolerance"); +Console.WriteLine($" At {probability:P0} confidence"); +Console.WriteLine($" Need N ≥ {minN}"); + +// Adjust MCMC iterations accordingly +``` + +## Practical Diagnostics Workflow + +### Complete Convergence Check + +```cs +using Numerics.Sampling.MCMC; +using Numerics.Data.Statistics; + +// Step 1: Run sampler +var sampler = new ARWMH(priors, logLikelihood); +sampler.NumberOfChains = 4; +sampler.WarmupIterations = 2000; +sampler.Iterations = 5000; +sampler.ThinningInterval = 10; +sampler.Sample(); + +Console.WriteLine("MCMC Convergence Diagnostics"); +Console.WriteLine("=" + new string('=', 60)); + +// Step 2: Extract samples +var samples = sampler.ParameterSets; +int nParams = samples[0].Values.Length; + +Console.WriteLine($"\nSampling Summary:"); +Console.WriteLine($" Chains: {sampler.NumberOfChains}"); +Console.WriteLine($" Warmup: {sampler.WarmupIterations}"); +Console.WriteLine($" Iterations: {sampler.Iterations}"); +Console.WriteLine($" Thinning: {sampler.ThinningInterval}"); +Console.WriteLine($" Total samples: {samples.Length}"); + +// Step 3: Check Gelman-Rubin +Console.WriteLine($"\nGelman-Rubin Statistics:"); +var chains = ExtractChains(sampler); // Helper function +double[] rhat = MCMCDiagnostics.GelmanRubin(chains, sampler.WarmupIterations); + +bool converged = true; +for (int i = 0; i < nParams; i++) +{ + string status = rhat[i] < 1.1 ? "✓" : "✗"; + Console.WriteLine($" {status} θ{i}: R̂ = {rhat[i]:F4}"); + if (rhat[i] >= 1.1) converged = false; +} + +// Step 4: Check ESS +Console.WriteLine($"\nEffective Sample Size:"); +double[] ess = MCMCDiagnostics.EffectiveSampleSize(chains, out double[][,] acf); + +int minESS = (int)ess.Min(); +for (int i = 0; i < nParams; i++) +{ + double efficiency = ess[i] / samples.Length; + Console.WriteLine($" θ{i}: ESS = {ess[i]:F0} ({efficiency:P1} efficiency)"); +} + +// Step 5: Overall assessment +Console.WriteLine($"\nOverall Assessment:"); +if (converged && minESS > 100) + Console.WriteLine(" ✓ PASS: Chains converged, sufficient samples"); +else if (!converged) + Console.WriteLine(" ✗ FAIL: Chains not converged - run longer"); +else + Console.WriteLine(" ⚠ MARGINAL: Converged but low ESS - consider more iterations"); + +// Step 6: Recommendations +if (minESS < 100) +{ + int recommended = (int)(sampler.Iterations * 100.0 / minESS); + Console.WriteLine($"\nRecommendation: Increase iterations to ~{recommended}"); +} +``` + +## Visual Diagnostics + +While the library doesn't provide plotting, these are essential checks: + +### Trace Plots + +Plot parameter values vs. iteration: + +```cs +Console.WriteLine("Export trace data for plotting:"); +Console.WriteLine("Iteration | Parameter Values"); + +for (int i = 0; i < Math.Min(samples.Length, 100); i++) +{ + Console.Write($"{i,9} | "); + foreach (var val in samples[i].Values) + { + Console.Write($"{val,8:F3} "); + } + Console.WriteLine(); +} + +Console.WriteLine("\nGood traces: Stationary, well-mixed 'hairy caterpillar'"); +Console.WriteLine("Bad traces: Trending, stuck, or oscillating patterns"); +``` + +### Posterior Distributions + +```cs +// Export posterior samples for histogram +for (int param = 0; param < nParams; param++) +{ + var values = samples.Select(s => s.Values[param]).ToArray(); + + Console.WriteLine($"\nParameter {param} summary:"); + Console.WriteLine($" Mean: {values.Average():F4}"); + Console.WriteLine($" Median: {Statistics.Percentile(values.OrderBy(v => v).ToArray(), 50):F4}"); + Console.WriteLine($" SD: {Statistics.StandardDeviation(values):F4}"); + Console.WriteLine($" 95% CI: [{Statistics.Percentile(values.OrderBy(v => v).ToArray(), 2.5):F4}, " + + $"{Statistics.Percentile(values.OrderBy(v => v).ToArray(), 97.5):F4}]"); +} +``` + +## Troubleshooting Convergence Issues + +### Problem: High R̂ (> 1.1) + +**Diagnosis:** +```cs +if (rhat.Max() > 1.1) +{ + Console.WriteLine("Convergence issue detected"); + Console.WriteLine("Possible causes:"); + Console.WriteLine(" 1. Insufficient warmup"); + Console.WriteLine(" 2. Poor mixing"); + Console.WriteLine(" 3. Multimodal posterior"); + Console.WriteLine(" 4. Bad initialization"); +} +``` + +**Solutions:** +1. **Increase warmup** + ```cs + sampler.WarmupIterations = 5000; // Double it + ``` + +2. **Try different sampler** + ```cs + // Switch from RWMH to DEMCz for better mixing + var betterSampler = new DEMCz(priors, logLikelihood); + ``` + +3. **Check initialization** + ```cs + // Ensure initial values are reasonable + // Not too far from expected values + ``` + +### Problem: Low ESS (< 100) + +**Diagnosis:** +```cs +if (ess.Min() < 100) +{ + Console.WriteLine($"Low ESS detected: {ess.Min():F0}"); + Console.WriteLine("Chains are highly autocorrelated"); +} +``` + +**Solutions:** +1. **Increase iterations** + ```cs + int factor = (int)Math.Ceiling(100.0 / ess.Min()); + sampler.Iterations *= factor; + Console.WriteLine($"Suggested iterations: {sampler.Iterations}"); + ``` + +2. **Increase thinning** + ```cs + sampler.ThinningInterval = 20; // Keep every 20th sample + ``` + +3. **Improve sampler** + ```cs + // Use ARWMH instead of RWMH + // Use DEMCz for high dimensions + ``` + +### Problem: Multimodal Posterior + +**Diagnosis:** +```cs +// Check if chains explore different modes +foreach (int param in new[] { 0, 1, 2 }) +{ + var chainMeans = chains.Select(chain => + chain.Select(ps => ps.Values[param]).Average()).ToArray(); + + double meanRange = chainMeans.Max() - chainMeans.Min(); + double overallSD = Statistics.StandardDeviation( + samples.Select(s => s.Values[param]).ToArray()); + + if (meanRange > 2 * overallSD) + { + Console.WriteLine($"Parameter {param} may have multiple modes"); + Console.WriteLine(" Chain means differ substantially"); + } +} +``` + +**Solutions:** +1. **Use population-based sampler** + ```cs + var demcz = new DEMCz(priors, logLikelihood); + demcz.NumberOfChains = 10; // More chains + ``` + +2. **Check for model identification issues** + +3. **Consider transforming parameters** + +## Best Practices + +### 1. Always Run Multiple Chains + +```cs +// Minimum 4 chains +sampler.NumberOfChains = 4; + +// Benefits: +// - Can compute R̂ +// - Detect multimodality +// - More robust inference +``` + +### 2. Adequate Warmup + +```cs +// Rule of thumb: Warmup ≥ 50% of iterations +sampler.WarmupIterations = Math.Max(2000, sampler.Iterations / 2); +``` + +### 3. Check Diagnostics BEFORE Inference + +```cs +// Always check before using samples +bool ready = (rhat.Max() < 1.1) && (ess.Min() > 100); + +if (!ready) +{ + Console.WriteLine("⚠ WARNING: Do not use these samples!"); + Console.WriteLine("Run diagnostics and extend sampling"); + return; +} + +// Proceed with inference... +``` + +### 4. Document Settings + +```cs +Console.WriteLine("MCMC Configuration:"); +Console.WriteLine($" Sampler: {sampler.GetType().Name}"); +Console.WriteLine($" Chains: {sampler.NumberOfChains}"); +Console.WriteLine($" Warmup: {sampler.WarmupIterations}"); +Console.WriteLine($" Iterations: {sampler.Iterations}"); +Console.WriteLine($" Thinning: {sampler.ThinningInterval}"); +Console.WriteLine($" Total samples: {samples.Length}"); +Console.WriteLine($" R̂ range: [{rhat.Min():F3}, {rhat.Max():F3}]"); +Console.WriteLine($" ESS range: [{ess.Min():F0}, {ess.Max():F0}]"); +``` + +### 5. Iterative Improvement + +```cs +int iteration = 1; +while (rhat.Max() > 1.05 || ess.Min() < 200) +{ + Console.WriteLine($"\nIteration {iteration}: Extending sampling..."); + + sampler.Iterations += 5000; + sampler.Sample(); + + // Recompute diagnostics + chains = ExtractChains(sampler); + rhat = MCMCDiagnostics.GelmanRubin(chains, sampler.WarmupIterations); + ess = MCMCDiagnostics.EffectiveSampleSize(chains, out _); + + iteration++; + + if (iteration > 5) + { + Console.WriteLine("Convergence issues persist - check model/sampler"); + break; + } +} +``` + +## Summary + +| Diagnostic | Target | Action if Not Met | +|------------|--------|------------------| +| **R̂** | < 1.1 | Increase warmup, run longer | +| **ESS** | > 100 (per param) | Increase iterations, improve mixing | +| **Visual traces** | Stationary | Check initialization, try different sampler | +| **ACF** | Drops quickly | Increase thinning, better sampler | + +--- + +## References + +[1] Gelman, A., & Rubin, D. B. (1992). Inference from iterative simulation using multiple sequences. *Statistical Science*, 7(4), 457-472. + +[2] Gelman, A., Carlin, J. B., Stern, H. S., Dunson, D. B., Vehtari, A., & Rubin, D. B. (2013). *Bayesian Data Analysis* (3rd ed.). CRC Press. + +--- + +[← Previous: MCMC Methods](mcmc.md) | [Back to Index](../index.md) diff --git a/docs/sampling/mcmc.md b/docs/sampling/mcmc.md new file mode 100644 index 00000000..df78f065 --- /dev/null +++ b/docs/sampling/mcmc.md @@ -0,0 +1,632 @@ +# MCMC Sampling + +[← Previous: Goodness-of-Fit](../statistics/goodness-of-fit.md) | [Back to Index](../index.md) | [Next: Convergence Diagnostics →](convergence-diagnostics.md) + +Markov Chain Monte Carlo (MCMC) methods sample from complex posterior distributions that are difficult to sample directly. The ***Numerics*** library provides multiple MCMC samplers for Bayesian inference, uncertainty quantification, and parameter estimation with full posterior distributions [[1]](#1). + +## Available MCMC Samplers + +| Sampler | Full Name | Best For | Key Features | +|---------|-----------|----------|--------------| +| **RWMH** | Random Walk Metropolis-Hastings | General purpose, small dimensions | Simple, robust baseline | +| **ARWMH** | Adaptive Random Walk M-H | Medium dimensions (2-20) | Self-tuning proposal | +| **DEMCz** | Differential Evolution MCMC | High dimensions, multimodal | Population-based, efficient | +| **DEMCzs** | DE-MCMC with snooker update | Very high dimensions | Enhanced DE-MCMC | +| **HMC** | Hamiltonian Monte Carlo | Smooth posteriors | Uses gradient information | +| **Gibbs** | Gibbs Sampler | Conditional distributions available | No rejections | + +## Common MCMC Interface + +All samplers inherit from `MCMCSampler` base class with common properties: + +```cs +// Configuration +int PRNGSeed // Random seed (default: 12345) +int InitialIterations // Initialization phase (default: 10) +int WarmupIterations // Burn-in period (default: 1750) +int Iterations // Main sampling (default: 3500) +int NumberOfChains // Parallel chains (default: 4) +int ThinningInterval // Keep every nth sample (default: 20) + +// Inputs +List PriorDistributions +LogLikelihood LogLikelihoodFunction + +// Outputs (after sampling) +ParameterSet[] ParameterSets // All samples +double[] LogLikelihoods // Log-likelihood values +double[] LogPosteriors // Log-posterior values +int[] SampleCount // Samples per chain +``` + +## Defining the Model + +### Step 1: Define Prior Distributions + +```cs +using Numerics.Distributions; +using Numerics.Sampling.MCMC; + +// Example: Linear regression y = a + b*x + ε +// Parameters: [a (intercept), b (slope), σ (noise)] + +var priors = new List +{ + new Normal(0, 10), // Intercept: N(0, 10) + new Normal(0, 10), // Slope: N(0, 10) + new Uniform(0.1, 5.0) // Noise std dev: Uniform(0.1, 5) +}; + +Console.WriteLine("Prior Distributions:"); +for (int i = 0; i < priors.Count; i++) +{ + Console.WriteLine($" θ{i}: {priors[i].DisplayName}"); +} +``` + +### Step 2: Define Log-Likelihood Function + +The log-likelihood function computes the log-probability of the data given parameters: + +```cs +// Observed data +double[] xData = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; +double[] yData = { 2.5, 4.8, 6.2, 8.5, 10.1, 12.8, 14.2, 16.5, 18.1, 20.3 }; + +// Define log-likelihood function +LogLikelihood logLikelihood = (parameters) => +{ + double a = parameters[0]; // Intercept + double b = parameters[1]; // Slope + double sigma = parameters[2]; // Noise std dev + + // Prior log-likelihood + double logPrior = priors[0].LogPDF(a) + + priors[1].LogPDF(b) + + priors[2].LogPDF(sigma); + + // Data log-likelihood: y ~ N(a + b*x, σ²) + double logData = 0; + for (int i = 0; i < xData.Length; i++) + { + double predicted = a + b * xData[i]; + double residual = yData[i] - predicted; + logData += -0.5 * Math.Log(2 * Math.PI * sigma * sigma) - + 0.5 * residual * residual / (sigma * sigma); + } + + return logPrior + logData; +}; +``` + +**Important:** The log-likelihood function should return the sum of: +1. Log-prior probability: `Σ log(p(θᵢ))` +2. Log-data likelihood: `log(p(data|θ))` + +This gives the log-posterior: `log(p(θ|data)) ∝ log(p(θ)) + log(p(data|θ))` + +## Random Walk Metropolis-Hastings (RWMH) + +The simplest and most robust MCMC algorithm [[2]](#2): + +```cs +using Numerics.Sampling.MCMC; + +// Create sampler +var rwmh = new RWMH(priors, logLikelihood); + +// Configure sampling +rwmh.PRNGSeed = 12345; +rwmh.InitialIterations = 50; // 5x parameters +rwmh.WarmupIterations = 2000; // Burn-in +rwmh.Iterations = 5000; // Main sampling +rwmh.NumberOfChains = 4; // Parallel chains +rwmh.ThinningInterval = 10; // Keep every 10th sample + +// Run sampler +Console.WriteLine("Running RWMH sampler..."); +rwmh.Sample(); + +// Access results +var samples = rwmh.ParameterSets; +Console.WriteLine($"Generated {samples.Length} samples"); +Console.WriteLine($"Samples per chain: {string.Join(", ", rwmh.SampleCount)}"); + +// Posterior statistics +for (int i = 0; i < priors.Count; i++) +{ + var values = samples.Select(s => s.Values[i]).ToArray(); + double mean = values.Average(); + double std = Statistics.StandardDeviation(values); + double q025 = Statistics.Percentile(values.OrderBy(x => x).ToArray(), 2.5); + double q975 = Statistics.Percentile(values.OrderBy(x => x).ToArray(), 97.5); + + Console.WriteLine($"θ{i}: {mean:F3} ± {std:F3}, 95% CI: [{q025:F3}, {q975:F3}]"); +} +``` + +**When to use RWMH:** +- General purpose baseline +- Low-dimensional problems (< 10 parameters) +- When simplicity and robustness are priorities +- As a reference for comparing other samplers + +## Adaptive Random Walk M-H (ARWMH) + +ARWMH automatically tunes the proposal distribution during warmup [[3]](#3): + +```cs +var arwmh = new ARWMH(priors, logLikelihood); + +// Configuration +arwmh.PRNGSeed = 12345; +arwmh.WarmupIterations = 2000; // Adaptation happens here +arwmh.Iterations = 5000; +arwmh.NumberOfChains = 4; + +Console.WriteLine("Running Adaptive RWMH sampler..."); +arwmh.Sample(); + +var samples = arwmh.ParameterSets; +Console.WriteLine($"Generated {samples.Length} samples"); + +// ARWMH adapts proposal covariance to achieve ~23% acceptance rate +Console.WriteLine("ARWMH automatically tuned proposal during warmup"); +``` + +**When to use ARWMH:** +- Medium-dimensional problems (2-20 parameters) +- When you don't want to manually tune proposals +- Correlated parameters +- Default choice for most applications + +**Advantages:** +- No manual tuning required +- Adapts to parameter correlations +- Generally more efficient than fixed RWMH + +## Differential Evolution MCMC (DEMCz) + +Population-based sampler using differential evolution [[4]](#4): + +```cs +var demcz = new DEMCz(priors, logLikelihood); + +// Configuration +demcz.PRNGSeed = 12345; +demcz.NumberOfChains = 10; // More chains for population diversity +demcz.WarmupIterations = 2000; +demcz.Iterations = 5000; + +Console.WriteLine("Running DE-MCMC sampler..."); +demcz.Sample(); + +var samples = demcz.ParameterSets; +Console.WriteLine($"Generated {samples.Length} samples from {demcz.NumberOfChains} chains"); + +// DEMCz is particularly effective for multimodal posteriors +``` + +**When to use DEMCz:** +- High-dimensional problems (20+ parameters) +- Multimodal posteriors +- Complex posterior geometry +- When ARWMH struggles with convergence + +**Advantages:** +- Excellent for high dimensions +- Handles multimodal distributions +- Robust to initialization +- Self-tuning proposals from population + +### DEMCz with Snooker Update (DEMCzs) + +Enhanced version with improved mixing: + +```cs +var demczs = new DEMCzs(priors, logLikelihood); + +demczs.NumberOfChains = 12; // Even more chains recommended +demczs.WarmupIterations = 2000; +demczs.Iterations = 5000; + +Console.WriteLine("Running DE-MCMC with snooker update..."); +demczs.Sample(); + +// Snooker update provides better exploration in very high dimensions +``` + +## Hamiltonian Monte Carlo (HMC) + +Uses gradient information for efficient sampling [[5]](#5): + +```cs +var hmc = new HMC(priors, logLikelihood); + +// HMC-specific settings +hmc.NumberOfChains = 4; +hmc.WarmupIterations = 1000; // HMC converges faster +hmc.Iterations = 2000; + +Console.WriteLine("Running Hamiltonian Monte Carlo..."); +hmc.Sample(); + +// HMC produces high-quality samples with lower autocorrelation +var samples = hmc.ParameterSets; +Console.WriteLine($"Generated {samples.Length} high-quality samples"); +``` + +**When to use HMC:** +- Smooth, differentiable posteriors +- When gradient information is available +- Need for low autocorrelation +- Medium to high dimensions with smooth geometry + +**Advantages:** +- Very efficient (low autocorrelation) +- Explores parameter space quickly +- Excellent for smooth posteriors + +**Disadvantages:** +- Requires gradient computation +- Less robust to discontinuities +- More complex to tune + +## Gibbs Sampler + +Samples each parameter conditionally given others: + +```cs +var gibbs = new Gibbs(priors, logLikelihood); + +gibbs.NumberOfChains = 4; +gibbs.WarmupIterations = 1500; +gibbs.Iterations = 3500; + +Console.WriteLine("Running Gibbs sampler..."); +gibbs.Sample(); + +// Gibbs has no rejections - every proposal is accepted +Console.WriteLine("Gibbs sampler completed (no rejections)"); +``` + +**When to use Gibbs:** +- Conditional distributions available in closed form +- Conjugate prior-likelihood pairs +- Hierarchical models + +**Note:** Gibbs is most efficient when conditional distributions are easy to sample from. For general problems, ARWMH or DEMCz are often better choices. + +## Complete Bayesian Inference Example + +### Example 1: Linear Regression with Full Uncertainty + +```cs +using Numerics.Distributions; +using Numerics.Sampling.MCMC; +using Numerics.Data.Statistics; + +// Generate synthetic data +double trueIntercept = 2.0; +double trueSlope = 1.8; +double trueNoise = 0.5; + +var random = new MersenneTwister(123); +int n = 20; +double[] x = Enumerable.Range(1, n).Select(i => (double)i).ToArray(); +double[] yTrue = x.Select(xi => trueIntercept + trueSlope * xi).ToArray(); +double[] y = yTrue.Select(yi => yi + new Normal(0, trueNoise).InverseCDF(random.NextDouble())).ToArray(); + +Console.WriteLine($"Generated {n} data points"); +Console.WriteLine($"True parameters: a={trueIntercept}, b={trueSlope}, σ={trueNoise}"); + +// Define priors +var priors = new List +{ + new Normal(0, 10), // Intercept + new Normal(0, 10), // Slope + new Uniform(0.1, 5.0) // Noise +}; + +// Define log-likelihood +LogLikelihood logLik = (theta) => +{ + double a = theta[0]; + double b = theta[1]; + double sigma = theta[2]; + + // Prior + double logPrior = priors[0].LogPDF(a) + priors[1].LogPDF(b) + priors[2].LogPDF(sigma); + + // Likelihood + double logData = 0; + for (int i = 0; i < n; i++) + { + double mu = a + b * x[i]; + logData += new Normal(mu, sigma).LogPDF(y[i]); + } + + return logPrior + logData; +}; + +// Run MCMC +var sampler = new ARWMH(priors, logLik); +sampler.WarmupIterations = 2000; +sampler.Iterations = 5000; +sampler.NumberOfChains = 4; +sampler.ThinningInterval = 5; + +Console.WriteLine("\nRunning MCMC..."); +sampler.Sample(); + +// Analyze results +var samples = sampler.ParameterSets; +Console.WriteLine($"\nPosterior Summary ({samples.Length} samples):"); +Console.WriteLine("Parameter | True | Post Mean | Post SD | 95% Credible Interval"); +Console.WriteLine("---------------------------------------------------------------"); + +string[] names = { "Intercept", "Slope", "Noise SD" }; +double[] trueVals = { trueIntercept, trueSlope, trueNoise }; + +for (int i = 0; i < 3; i++) +{ + var vals = samples.Select(s => s.Values[i]).OrderBy(v => v).ToArray(); + double mean = vals.Average(); + double std = Statistics.StandardDeviation(vals); + double lower = Statistics.Percentile(vals, 2.5); + double upper = Statistics.Percentile(vals, 97.5); + + Console.WriteLine($"{names[i],-9} | {trueVals[i],5:F2} | {mean,9:F3} | {std,7:F3} | [{lower:F3}, {upper:F3}]"); +} + +// Posterior predictive +Console.WriteLine("\nPosterior Predictive at x=15:"); +double xNew = 15; +var predictions = samples.Select(s => s.Values[0] + s.Values[1] * xNew).ToArray(); +double predMean = predictions.Average(); +double predSD = Statistics.StandardDeviation(predictions); +double predLower = Statistics.Percentile(predictions.OrderBy(p => p).ToArray(), 2.5); +double predUpper = Statistics.Percentile(predictions.OrderBy(p => p).ToArray(), 97.5); + +Console.WriteLine($"E[y|x={xNew}] = {predMean:F2} ± {predSD:F2}"); +Console.WriteLine($"95% Credible Interval: [{predLower:F2}, {predUpper:F2}]"); +``` + +### Example 2: Distribution Parameter Estimation + +```cs +// Observed data from unknown GEV distribution +double[] annualMaxima = { 12500, 15300, 11200, 18700, 14100, 16800, 13400, 17200, 10500, 19300 }; + +Console.WriteLine("Bayesian Estimation of GEV Parameters"); +Console.WriteLine("=" + new string('=', 50)); + +// Prior distributions for GEV parameters [ξ, α, κ] +var priors = new List +{ + new Normal(15000, 5000), // Location: N(15000, 5000) + new Uniform(100, 5000), // Scale: U(100, 5000) + new Uniform(-0.5, 0.5) // Shape: U(-0.5, 0.5) +}; + +// Log-likelihood +LogLikelihood logLik = (theta) => +{ + double xi = theta[0]; + double alpha = theta[1]; + double kappa = theta[2]; + + // Check parameter validity + if (alpha <= 0) return double.NegativeInfinity; + + // Prior + double logPrior = priors[0].LogPDF(xi) + priors[1].LogPDF(alpha) + priors[2].LogPDF(kappa); + + // Likelihood + var gev = new GeneralizedExtremeValue(xi, alpha, kappa); + if (!gev.ParametersValid) return double.NegativeInfinity; + + double logData = annualMaxima.Sum(x => gev.LogPDF(x)); + + return logPrior + logData; +}; + +// Sample posterior +var sampler = new ARWMH(priors, logLik); +sampler.WarmupIterations = 3000; +sampler.Iterations = 10000; +sampler.NumberOfChains = 4; +sampler.ThinningInterval = 10; + +Console.WriteLine("Sampling posterior distribution..."); +sampler.Sample(); + +var samples = sampler.ParameterSets; +Console.WriteLine($"Generated {samples.Length} posterior samples\n"); + +// Parameter estimates +string[] paramNames = { "Location (ξ)", "Scale (α)", "Shape (κ)" }; +for (int i = 0; i < 3; i++) +{ + var vals = samples.Select(s => s.Values[i]).OrderBy(v => v).ToArray(); + Console.WriteLine($"{paramNames[i]}:"); + Console.WriteLine($" Mean: {vals.Average():F2}"); + Console.WriteLine($" Median: {Statistics.Percentile(vals, 50):F2}"); + Console.WriteLine($" 95% CI: [{Statistics.Percentile(vals, 2.5):F2}, {Statistics.Percentile(vals, 97.5):F2}]"); +} + +// Posterior predictive quantiles +Console.WriteLine("\nPosterior Predictive 100-year Flood:"); +var q100 = samples.Select(s => +{ + var dist = new GeneralizedExtremeValue(s.Values[0], s.Values[1], s.Values[2]); + return dist.InverseCDF(0.99); +}).OrderBy(q => q).ToArray(); + +Console.WriteLine($" Mean: {q100.Average():F0} cfs"); +Console.WriteLine($" Median: {Statistics.Percentile(q100, 50):F0} cfs"); +Console.WriteLine($" 95% CI: [{Statistics.Percentile(q100, 2.5):F0}, {Statistics.Percentile(q100, 97.5):F0}] cfs"); +``` + +## Thinning + +Thinning reduces autocorrelation by keeping only every nth sample: + +```cs +// Without thinning +sampler.ThinningInterval = 1; // Keep all samples +sampler.Iterations = 10000; // Need many iterations + +// With thinning +sampler.ThinningInterval = 20; // Keep every 20th sample +sampler.Iterations = 10000; // Total iterations +// Effective samples = 10000 / 20 = 500 per chain +``` + +**Thinning trade-offs:** +- **Reduces autocorrelation** in final samples +- **Saves memory** for long runs +- **Doesn't improve efficiency** (better to run longer without thinning) +- **Rule of thumb:** Keep thinning interval ≤ autocorrelation length + +## Multiple Chains + +Running multiple chains helps assess convergence: + +```cs +sampler.NumberOfChains = 4; // Standard choice + +// Access chain-specific information +int[] samplesPerChain = sampler.SampleCount; + +Console.WriteLine("Samples per chain:"); +for (int i = 0; i < samplesPerChain.Length; i++) +{ + Console.WriteLine($" Chain {i + 1}: {samplesPerChain[i]} samples"); +} +``` + +**Benefits of multiple chains:** +1. Assess convergence via R-hat statistic +2. Detect multimodal posteriors +3. Parallelize computation +4. More robust inference + +## Warmup (Burn-in) + +Warmup iterations are discarded to allow chains to reach stationarity: + +```cs +sampler.InitialIterations = 50; // Quick initialization +sampler.WarmupIterations = 2000; // Burn-in / adaptation +sampler.Iterations = 5000; // Kept samples + +// Total iterations = Initial + Warmup + Main +// Only main iterations are kept +``` + +**Warmup guidelines:** +- **RWMH**: 2000-5000 iterations +- **ARWMH**: 2000-3000 (adapts during warmup) +- **DEMCz**: 1500-3000 (converges faster) +- **HMC**: 1000-2000 (efficient exploration) +- **Rule of thumb**: Warmup ≥ 50% of main iterations + +## Best Practices + +### 1. Always Check Convergence + +```cs +// Visual inspection of traces +// Check R-hat < 1.1 for all parameters +// Effective sample size > 100 per parameter +``` + +### 2. Run Multiple Chains + +```cs +// Minimum 4 chains for convergence assessment +sampler.NumberOfChains = 4; +``` + +### 3. Start with Enough Iterations + +```cs +// Conservative starting point +sampler.WarmupIterations = 2000; +sampler.Iterations = 5000; +// Can adjust based on convergence diagnostics +``` + +### 4. Choose Appropriate Sampler + +```cs +// Low dimensions (< 5): RWMH or ARWMH +// Medium (5-20): ARWMH (default choice) +// High (20+): DEMCz or DEMCzs +// Smooth posteriors: HMC +// Conjugate models: Gibbs +``` + +### 5. Informative Priors + +```cs +// Use reasonable prior distributions +// Too vague: var prior = new Uniform(-1e10, 1e10); // Bad! +// Better: var prior = new Normal(expectedValue, reasonableSD); +``` + +### 6. Validate Log-Likelihood + +```cs +// Test log-likelihood function before sampling +double[] testParams = { 2.0, 1.5, 0.5 }; +double logLik = logLikelihoodFunction(testParams); + +Console.WriteLine($"Test log-likelihood: {logLik}"); +if (double.IsNaN(logLik) || double.IsInfinity(logLik)) +{ + Console.WriteLine("Warning: Check log-likelihood function!"); +} +``` + +## Comparing Samplers + +```cs +var samplers = new (string Name, MCMCSampler Sampler)[] +{ + ("RWMH", new RWMH(priors, logLik)), + ("ARWMH", new ARWMH(priors, logLik)), + ("DEMCz", new DEMCz(priors, logLik)) +}; + +foreach (var (name, sampler) in samplers) +{ + sampler.WarmupIterations = 2000; + sampler.Iterations = 5000; + + var watch = System.Diagnostics.Stopwatch.StartNew(); + sampler.Sample(); + watch.Stop(); + + Console.WriteLine($"{name}: {sampler.ParameterSets.Length} samples in {watch.ElapsedMilliseconds}ms"); +} +``` + +--- + +## References + +[1] Gelman, A., Carlin, J. B., Stern, H. S., Dunson, D. B., Vehtari, A., & Rubin, D. B. (2013). *Bayesian Data Analysis* (3rd ed.). CRC Press. + +[2] Metropolis, N., Rosenbluth, A. W., Rosenbluth, M. N., Teller, A. H., & Teller, E. (1953). Equation of state calculations by fast computing machines. *The Journal of Chemical Physics*, 21(6), 1087-1092. + +[3] Haario, H., Saksman, E., & Tamminen, J. (2001). An adaptive Metropolis algorithm. *Bernoulli*, 7(2), 223-242. + +[4] ter Braak, C. J., & Vrugt, J. A. (2008). Differential evolution Markov chain with snooker updater and fewer chains. *Statistics and Computing*, 18(4), 435-446. + +[5] Neal, R. M. (2011). MCMC using Hamiltonian dynamics. *Handbook of Markov Chain Monte Carlo*, 2(11), 2. + +--- + +[← Previous: Goodness-of-Fit](../statistics/goodness-of-fit.md) | [Back to Index](../index.md) | [Next: Convergence Diagnostics →](convergence-diagnostics.md) diff --git a/docs/sampling/random-generation.md b/docs/sampling/random-generation.md new file mode 100644 index 00000000..d9d02b4f --- /dev/null +++ b/docs/sampling/random-generation.md @@ -0,0 +1,440 @@ +# Random Number Generation + +[← Previous: Time Series](../data/time-series.md) | [Back to Index](../index.md) + +The ***Numerics*** library provides multiple random number generation methods for different applications. These include high-quality pseudo-random generators, quasi-random sequences, and advanced sampling techniques. + +## Pseudo-Random Number Generators + +### Mersenne Twister + +The Mersenne Twister is a high-quality pseudo-random number generator with excellent statistical properties [[1]](#1): + +```cs +using Numerics.Sampling; + +// Create with time-based seed +var rng = new MersenneTwister(); + +// Create with specific seed (for reproducibility) +var rng2 = new MersenneTwister(seed: 12345); + +// Generate random integers +int randomInt = rng.Next(); // [0, Int32.MaxValue) +int randomInRange = rng.Next(1, 100); // [1, 100) + +// Generate random doubles +double u1 = rng.NextDouble(); // [0.0, 1.0) +double u2 = rng.GenRandReal1(); // [0, 1] +double u3 = rng.GenRandReal2(); // [0, 1) +double u4 = rng.GenRandReal3(); // (0, 1) +double u5 = rng.GenRandRes53(); // [0, 1) with 53-bit resolution + +Console.WriteLine("Random numbers:"); +Console.WriteLine($"Integer: {randomInt}"); +Console.WriteLine($"Double [0,1): {u1:F6}"); +Console.WriteLine($"Double (0,1): {u4:F6}"); +``` + +**Properties:** +- Period: 2^19937 - 1 (extremely long) +- Excellent uniformity and independence +- Fast generation +- Reproducible with seeds + +### Using with Distributions + +```cs +using Numerics.Distributions; + +var rng = new MersenneTwister(12345); + +// Generate from distributions +var normal = new Normal(100, 15); +double[] samples = new double[1000]; + +for (int i = 0; i < samples.Length; i++) +{ + double u = rng.NextDouble(); + samples[i] = normal.InverseCDF(u); +} + +Console.WriteLine($"Generated {samples.Length} Normal samples"); +Console.WriteLine($"Mean: {samples.Average():F2}"); +Console.WriteLine($"Std Dev: {Statistics.StandardDeviation(samples):F2}"); +``` + +## Quasi-Random Sequences + +### Sobol Sequence + +Low-discrepancy quasi-random sequences for better coverage of parameter space [[2]](#2): + +```cs +using Numerics.Sampling; + +// Create 2D Sobol sequence +var sobol = new SobolSequence(dimension: 2); + +Console.WriteLine("First 10 Sobol points:"); +for (int i = 0; i < 10; i++) +{ + double[] point = sobol.NextDouble(); + Console.WriteLine($"Point {i}: ({point[0]:F4}, {point[1]:F4})"); +} + +// Skip to specific index +double[] pointAt100 = sobol.SkipTo(100); +Console.WriteLine($"\nPoint at index 100: ({pointAt100[0]:F4}, {pointAt100[1]:F4})"); +``` + +**Properties:** +- Low discrepancy (better coverage than pseudo-random) +- Deterministic sequence +- Excellent for integration and optimization +- Converges faster than Monte Carlo + +**When to use:** +- Numerical integration (better than Monte Carlo) +- Parameter space exploration +- Optimization initialization +- Sensitivity analysis + +### Sobol vs. Pseudo-Random + +```cs +int n = 100; +var random = new MersenneTwister(123); +var sobol = new SobolSequence(2); + +// Generate pseudo-random points +var pseudoRandom = new List<(double, double)>(); +for (int i = 0; i < n; i++) +{ + pseudoRandom.Add((random.NextDouble(), random.NextDouble())); +} + +// Generate quasi-random points +var quasiRandom = new List<(double, double)>(); +for (int i = 0; i < n; i++) +{ + double[] point = sobol.NextDouble(); + quasiRandom.Add((point[0], point[1])); +} + +Console.WriteLine("Pseudo-random: Points may cluster"); +Console.WriteLine("Quasi-random: Points evenly distributed"); +Console.WriteLine("\nFor integration, quasi-random typically converges faster"); +``` + +## Latin Hypercube Sampling + +Stratified sampling for better parameter space coverage [[3]](#3): + +```cs +using Numerics.Sampling; + +// Generate Latin Hypercube sample +int sampleSize = 50; +int dimensions = 3; +int seed = 12345; + +// Random LHS +double[,] lhsRandom = LatinHypercube.Random(sampleSize, dimensions, seed); + +// Median LHS (centered in strata) +double[,] lhsMedian = LatinHypercube.Median(sampleSize, dimensions, seed); + +Console.WriteLine("Latin Hypercube Sample (Random):"); +Console.WriteLine("Sample | Dim 1 | Dim 2 | Dim 3"); +Console.WriteLine("-------|--------|--------|--------"); + +for (int i = 0; i < Math.Min(10, sampleSize); i++) +{ + Console.WriteLine($"{i,6} | {lhsRandom[i, 0],6:F4} | {lhsRandom[i, 1],6:F4} | {lhsRandom[i, 2],6:F4}"); +} + +// Transform to actual distributions +var normal = new Normal(100, 15); +var lognormal = new LogNormal(4, 0.5); +var uniform = new Uniform(0, 10); + +double[,] transformed = new double[sampleSize, 3]; +for (int i = 0; i < sampleSize; i++) +{ + transformed[i, 0] = normal.InverseCDF(lhsRandom[i, 0]); + transformed[i, 1] = lognormal.InverseCDF(lhsRandom[i, 1]); + transformed[i, 2] = uniform.InverseCDF(lhsRandom[i, 2]); +} + +Console.WriteLine("\nTransformed to distributions:"); +Console.WriteLine("Sample | Normal | LogNormal | Uniform"); +for (int i = 0; i < Math.Min(5, sampleSize); i++) +{ + Console.WriteLine($"{i,6} | {transformed[i, 0],6:F1} | {transformed[i, 1],9:F2} | {transformed[i, 2],7:F2}"); +} +``` + +**Properties:** +- Stratified sampling (one sample per stratum) +- Better coverage than simple random sampling +- Reduced variance in estimates +- Efficient for small sample sizes + +**When to use:** +- Monte Carlo simulation with limited budget +- Sensitivity analysis +- Calibration with expensive models +- Risk assessment studies + +## Practical Examples + +### Example 1: Monte Carlo Integration + +```cs +// Integrate f(x) = x² from 0 to 1 using different methods + +Func f = x => x * x; +int n = 1000; + +// Simple Monte Carlo (pseudo-random) +var rng = new MersenneTwister(123); +double mcSum = 0; +for (int i = 0; i < n; i++) +{ + mcSum += f(rng.NextDouble()); +} +double mcEstimate = mcSum / n; // Approximates ∫₀¹ x² dx = 1/3 + +// Quasi-Monte Carlo (Sobol) +var sobol = new SobolSequence(1); +double qmcSum = 0; +for (int i = 0; i < n; i++) +{ + qmcSum += f(sobol.NextDouble()[0]); +} +double qmcEstimate = qmcSum / n; + +double exact = 1.0 / 3.0; + +Console.WriteLine("Monte Carlo Integration of x² from 0 to 1:"); +Console.WriteLine($"Exact value: {exact:F6}"); +Console.WriteLine($"MC estimate: {mcEstimate:F6} (error: {Math.Abs(mcEstimate - exact):E4})"); +Console.WriteLine($"QMC estimate: {qmcEstimate:F6} (error: {Math.Abs(qmcEstimate - exact):E4})"); +Console.WriteLine("\nQMC typically has smaller error for same sample size"); +``` + +### Example 2: Uncertainty Propagation + +```cs +// Model: y = a*x + b*x² where a, b are uncertain + +var normal_a = new Normal(2.0, 0.3); +var normal_b = new Normal(1.0, 0.1); +double x = 5.0; + +// Monte Carlo with pseudo-random +var rng = new MersenneTwister(123); +int nSamples = 10000; + +double[] y_mc = new double[nSamples]; +for (int i = 0; i < nSamples; i++) +{ + double a = normal_a.InverseCDF(rng.NextDouble()); + double b = normal_b.InverseCDF(rng.NextDouble()); + y_mc[i] = a * x + b * x * x; +} + +// Latin Hypercube Sampling +var lhs = LatinHypercube.Random(nSamples, 2, seed: 123); + +double[] y_lhs = new double[nSamples]; +for (int i = 0; i < nSamples; i++) +{ + double a = normal_a.InverseCDF(lhs[i, 0]); + double b = normal_b.InverseCDF(lhs[i, 1]); + y_lhs[i] = a * x + b * x * x; +} + +Console.WriteLine("Uncertainty Propagation:"); +Console.WriteLine($"MC - Mean: {y_mc.Average():F2}, Std: {Statistics.StandardDeviation(y_mc):F2}"); +Console.WriteLine($"LHS - Mean: {y_lhs.Average():F2}, Std: {Statistics.StandardDeviation(y_lhs):F2}"); +Console.WriteLine("\nLHS typically more stable with fewer samples"); +``` + +### Example 3: Global Optimization Initialization + +```cs +// Initialize population for global optimization + +int popSize = 20; +int dimensions = 3; + +// Parameter bounds +double[] lowerBounds = { -10, -5, 0 }; +double[] upperBounds = { 10, 5, 100 }; + +// Generate initial population with LHS +var lhs = LatinHypercube.Random(popSize, dimensions, seed: 123); + +// Scale to actual bounds +double[,] population = new double[popSize, dimensions]; +for (int i = 0; i < popSize; i++) +{ + for (int d = 0; d < dimensions; d++) + { + population[i, d] = lowerBounds[d] + lhs[i, d] * (upperBounds[d] - lowerBounds[d]); + } +} + +Console.WriteLine("Initial Population for Optimization:"); +Console.WriteLine("Individual | Param 1 | Param 2 | Param 3"); +Console.WriteLine("-----------|---------|---------|----------"); + +for (int i = 0; i < Math.Min(10, popSize); i++) +{ + Console.WriteLine($"{i,10} | {population[i, 0],7:F2} | {population[i, 1],7:F2} | {population[i, 2],8:F2}"); +} + +Console.WriteLine("\nLHS ensures good coverage of parameter space"); +``` + +### Example 4: Sensitivity Analysis + +```cs +// Compute Sobol sensitivity indices using quasi-random sampling + +Func model = x => + x[0] + 2 * x[1] + 3 * x[2] + 4 * x[1] * x[2]; + +int n = 1000; +int dim = 3; + +// Generate two independent LHS samples +var A = LatinHypercube.Random(n, dim, seed: 123); +var B = LatinHypercube.Random(n, dim, seed: 456); + +// Evaluate model +double[] yA = new double[n]; +double[] yB = new double[n]; + +for (int i = 0; i < n; i++) +{ + yA[i] = model(new[] { A[i, 0], A[i, 1], A[i, 2] }); + yB[i] = model(new[] { B[i, 0], B[i, 1], B[i, 2] }); +} + +// First-order sensitivity indices +double varY = Statistics.Variance(yA); + +Console.WriteLine("Sensitivity Analysis:"); +Console.WriteLine("Parameter | First-Order Index"); +Console.WriteLine("----------|------------------"); + +for (int j = 0; j < dim; j++) +{ + double[] yABj = new double[n]; + + for (int i = 0; i < n; i++) + { + double[] x = new double[dim]; + for (int k = 0; k < dim; k++) + { + x[k] = (k == j) ? B[i, k] : A[i, k]; + } + yABj[i] = model(x); + } + + double S_j = (yA.Zip(yABj, (ya, yabj) => ya * yabj).Average() - + yA.Average() * yA.Average()) / varY; + + Console.WriteLine($"Param {j + 1} | {S_j,17:F4}"); +} +``` + +## Choosing a Random Number Generator + +| Method | Use Case | Pros | Cons | +|--------|----------|------|------| +| **Mersenne Twister** | General purpose | Fast, long period | Clusters in high dimensions | +| **Sobol** | Integration, optimization | Low discrepancy, deterministic | Not random, dimension limit | +| **Latin Hypercube** | Small sample studies | Stratified, efficient | Requires planning | + +### Decision Guide + +**Use Mersenne Twister when:** +- Need standard random numbers +- Simulating stochastic processes +- Large sample sizes available +- Distribution sampling + +**Use Sobol when:** +- Numerical integration +- Parameter space exploration +- Deterministic sequence needed +- Integration convergence critical + +**Use Latin Hypercube when:** +- Limited computational budget +- Sensitivity analysis +- Need efficient stratification +- Small to medium samples (10-1000) + +## Reproducibility + +### Setting Seeds + +```cs +// Pseudo-random - use same seed for reproducibility +var rng1 = new MersenneTwister(12345); +var rng2 = new MersenneTwister(12345); + +// Generate same sequence +for (int i = 0; i < 5; i++) +{ + double r1 = rng1.NextDouble(); + double r2 = rng2.NextDouble(); + Console.WriteLine($"RNG1: {r1:F6}, RNG2: {r2:F6}, Same: {r1 == r2}"); +} + +// Quasi-random - deterministic by design +var sobol1 = new SobolSequence(2); +var sobol2 = new SobolSequence(2); + +for (int i = 0; i < 3; i++) +{ + var point1 = sobol1.NextDouble(); + var point2 = sobol2.NextDouble(); + Console.WriteLine($"Sobol sequences identical: {point1[0] == point2[0] && point1[1] == point2[1]}"); +} +``` + +## Best Practices + +1. **Always set seeds** for reproducible research +2. **Document RNG choices** in methods section +3. **Use appropriate method** for application +4. **Check sample size** requirements +5. **Validate distributions** with statistical tests +6. **Consider quasi-random** for integration +7. **Use LHS** for expensive models + +## Performance Considerations + +- **Mersenne Twister**: Very fast, suitable for large samples +- **Sobol**: Slightly slower, excellent for moderate samples +- **Latin Hypercube**: Overhead for stratification, best for small samples + +--- + +## References + +[1] Matsumoto, M., & Nishimura, T. (1998). Mersenne twister: a 623-dimensionally equidistributed uniform pseudo-random number generator. *ACM Transactions on Modeling and Computer Simulation*, 8(1), 3-30. + +[2] Sobol, I. M. (1967). On the distribution of points in a cube and the approximate evaluation of integrals. *USSR Computational Mathematics and Mathematical Physics*, 7(4), 86-112. + +[3] McKay, M. D., Beckman, R. J., & Conover, W. J. (1979). A comparison of three methods for selecting values of input variables in the analysis of output from a computer code. *Technometrics*, 21(2), 239-245. + +--- + +[← Previous: Time Series](../data/time-series.md) | [Back to Index](../index.md) diff --git a/docs/statistics/descriptive.md b/docs/statistics/descriptive.md new file mode 100644 index 00000000..81cbe59b --- /dev/null +++ b/docs/statistics/descriptive.md @@ -0,0 +1,530 @@ +# Descriptive Statistics + +[← Back to Index](../index.md) | [Next: Goodness-of-Fit →](goodness-of-fit.md) + +The ***Numerics*** library provides comprehensive functions for computing descriptive statistics from data samples. The `Statistics` class contains static methods for all common statistical measures, sample moments, percentiles, and specialized analyses. + +## Basic Statistics + +### Central Tendency + +Measures of the center or typical value of a dataset: + +```cs +using Numerics.Data.Statistics; + +double[] data = { 10.5, 12.3, 11.8, 15.2, 13.7, 14.1, 16.8, 12.9, 11.2, 14.5 }; + +// Arithmetic mean +double mean = Statistics.Mean(data); // 13.3 + +// Geometric mean (for positive data) +double geomMean = Statistics.GeometricMean(data); // 13.13 + +// Harmonic mean +double harmMean = Statistics.HarmonicMean(data); // 12.96 + +Console.WriteLine($"Arithmetic mean: {mean:F2}"); +Console.WriteLine($"Geometric mean: {geomMean:F2}"); +Console.WriteLine($"Harmonic mean: {harmMean:F2}"); +``` + +**Note on Means:** +- **Arithmetic mean**: Best for symmetric data, $\bar{x} = \frac{1}{n}\sum x_i$ +- **Geometric mean**: For multiplicative data (growth rates, ratios), $\sqrt[n]{\prod x_i}$ +- **Harmonic mean**: For rates and ratios, $\frac{n}{\sum \frac{1}{x_i}}$ +- Relationship: $\text{Harmonic} \leq \text{Geometric} \leq \text{Arithmetic}$ + +### Dispersion + +Measures of spread or variability: + +```cs +double[] data = { 10.5, 12.3, 11.8, 15.2, 13.7, 14.1, 16.8, 12.9, 11.2, 14.5 }; + +// Sample variance (divides by n-1) +double variance = Statistics.Variance(data); // 4.08 + +// Population variance (divides by n) +double popVariance = Statistics.PopulationVariance(data); // 3.67 + +// Sample standard deviation +double stdDev = Statistics.StandardDeviation(data); // 2.02 + +// Population standard deviation +double popStdDev = Statistics.PopulationStandardDeviation(data); // 1.92 + +// Coefficient of variation (relative variability) +double cv = Statistics.CoefficientOfVariation(data); // 0.152 (15.2%) + +Console.WriteLine($"Variance: {variance:F2}"); +Console.WriteLine($"Std Dev: {stdDev:F2}"); +Console.WriteLine($"CV: {cv:P1}"); +``` + +### Efficient Combined Computations + +For better performance when needing multiple related statistics: + +```cs +// Compute mean and variance together (single pass through data) +var (mean, variance) = Statistics.MeanVariance(data); + +// Compute mean and standard deviation together +var (mean2, stdDev) = Statistics.MeanStandardDeviation(data); + +Console.WriteLine($"Mean: {mean:F2}, Variance: {variance:F2}"); +Console.WriteLine($"Mean: {mean2:F2}, Std Dev: {stdDev:F2}"); +``` + +### Range + +```cs +// Minimum and maximum +double min = Statistics.Minimum(data); // 10.5 +double max = Statistics.Maximum(data); // 16.8 +double range = max - min; // 6.3 + +// Sum +double sum = Statistics.Sum(data); // 133.0 + +Console.WriteLine($"Range: [{min:F1}, {max:F1}] with width {range:F1}"); +``` + +## Shape Statistics + +### Skewness + +Measures asymmetry of the distribution: + +```cs +double[] symmetric = { 1, 2, 3, 4, 5, 6, 7, 8, 9 }; +double[] rightSkewed = { 1, 1, 2, 2, 3, 5, 8, 13, 21 }; +double[] leftSkewed = { 21, 13, 8, 5, 3, 2, 2, 1, 1 }; + +double skew1 = Statistics.Skewness(symmetric); // ≈ 0 (symmetric) +double skew2 = Statistics.Skewness(rightSkewed); // > 0 (right-skewed) +double skew3 = Statistics.Skewness(leftSkewed); // < 0 (left-skewed) + +Console.WriteLine($"Symmetric data skewness: {skew1:F3}"); +Console.WriteLine($"Right-skewed data: {skew2:F3}"); +Console.WriteLine($"Left-skewed data: {skew3:F3}"); + +// Interpretation +if (Math.Abs(skew1) < 0.5) + Console.WriteLine("Approximately symmetric"); +else if (skew1 > 0) + Console.WriteLine("Right-skewed (tail extends right)"); +else + Console.WriteLine("Left-skewed (tail extends left)"); +``` + +### Kurtosis + +Measures tail heaviness (peakedness): + +```cs +double[] data = { 10.5, 12.3, 11.8, 15.2, 13.7, 14.1, 16.8, 12.9, 11.2, 14.5 }; + +// Excess kurtosis (subtract 3 from Pearson's kurtosis) +double kurtosis = Statistics.Kurtosis(data); + +Console.WriteLine($"Kurtosis: {kurtosis:F3}"); + +// Interpretation +if (kurtosis > 0) + Console.WriteLine("Leptokurtic - heavier tails than normal (excess kurtosis > 0)"); +else if (kurtosis < 0) + Console.WriteLine("Platykurtic - lighter tails than normal (excess kurtosis < 0)"); +else + Console.WriteLine("Mesokurtic - similar to normal distribution (excess kurtosis ≈ 0)"); +``` + +## Moments + +### Product Moments + +The first four product moments (mean, standard deviation, skewness, kurtosis): + +```cs +double[] data = { 10.5, 12.3, 11.8, 15.2, 13.7, 14.1, 16.8, 12.9, 11.2, 14.5 }; + +double[] moments = Statistics.ProductMoments(data); + +Console.WriteLine("Product Moments:"); +Console.WriteLine($" Mean (μ): {moments[0]:F2}"); +Console.WriteLine($" Standard Deviation (σ): {moments[1]:F2}"); +Console.WriteLine($" Skewness (γ₁): {moments[2]:F3}"); +Console.WriteLine($" Kurtosis (γ₂): {moments[3]:F3}"); +``` + +### Linear Moments (L-Moments) + +Linear moments are robust alternatives to product moments [[1]](#1): + +```cs +double[] data = { 10.5, 12.3, 11.8, 15.2, 13.7, 14.1, 16.8, 12.9, 11.2, 14.5 }; + +double[] lMoments = Statistics.LinearMoments(data); + +Console.WriteLine("L-Moments:"); +Console.WriteLine($" λ₁ (L-location): {lMoments[0]:F2}"); +Console.WriteLine($" λ₂ (L-scale): {lMoments[1]:F2}"); +Console.WriteLine($" τ₃ (L-skewness): {lMoments[2]:F4}"); +Console.WriteLine($" τ₄ (L-kurtosis): {lMoments[3]:F4}"); + +// L-moments are preferred for: +// - Small samples (n < 50) +// - Data with outliers +// - Hydrological data +// - Extreme value analysis +``` + +## Percentiles and Quantiles + +### Computing Percentiles + +```cs +double[] data = { 10.5, 12.3, 11.8, 15.2, 13.7, 14.1, 16.8, 12.9, 11.2, 14.5 }; + +// Single percentile (k as decimal: 0-100) +double median = Statistics.Percentile(data, 50); // 50th percentile +double p90 = Statistics.Percentile(data, 90); // 90th percentile +double p95 = Statistics.Percentile(data, 95); // 95th percentile + +Console.WriteLine($"Median (50th percentile): {median:F2}"); +Console.WriteLine($"90th percentile: {p90:F2}"); +Console.WriteLine($"95th percentile: {p95:F2}"); + +// Multiple percentiles at once (more efficient) +double[] percentiles = Statistics.Percentile(data, new double[] { 25, 50, 75, 90, 95 }); + +Console.WriteLine("\nPercentiles:"); +Console.WriteLine($" 25th: {percentiles[0]:F2}"); +Console.WriteLine($" 50th: {percentiles[1]:F2}"); +Console.WriteLine($" 75th: {percentiles[2]:F2}"); +Console.WriteLine($" 90th: {percentiles[3]:F2}"); +Console.WriteLine($" 95th: {percentiles[4]:F2}"); + +// Note: Can specify if data is already sorted for efficiency +bool isSorted = false; +double q25 = Statistics.Percentile(data, 25, dataIsSorted: isSorted); +``` + +### Five-Number Summary + +Box plot statistics (Tukey's five-number summary): + +```cs +double[] data = { 10.5, 12.3, 11.8, 15.2, 13.7, 14.1, 16.8, 12.9, 11.2, 14.5 }; + +double[] fiveNum = Statistics.FiveNumberSummary(data); + +Console.WriteLine("Five-Number Summary:"); +Console.WriteLine($" Minimum: {fiveNum[0]:F2}"); +Console.WriteLine($" Q1 (25th percentile): {fiveNum[1]:F2}"); +Console.WriteLine($" Median (50th percentile): {fiveNum[2]:F2}"); +Console.WriteLine($" Q3 (75th percentile): {fiveNum[3]:F2}"); +Console.WriteLine($" Maximum: {fiveNum[4]:F2}"); + +// Interquartile range +double iqr = fiveNum[3] - fiveNum[1]; +Console.WriteLine($" IQR: {iqr:F2}"); + +// Outlier bounds (Tukey's fences) +double lowerFence = fiveNum[1] - 1.5 * iqr; +double upperFence = fiveNum[3] + 1.5 * iqr; +Console.WriteLine($" Outlier bounds: [{lowerFence:F2}, {upperFence:F2}]"); +``` + +### Seven-Number Summary + +Extended summary including additional percentiles: + +```cs +double[] sevenNum = Statistics.SevenNumberSummary(data); + +Console.WriteLine("Seven-Number Summary:"); +Console.WriteLine($" Minimum: {sevenNum[0]:F2}"); +Console.WriteLine($" 10th percentile: {sevenNum[1]:F2}"); +Console.WriteLine($" Q1 (25th): {sevenNum[2]:F2}"); +Console.WriteLine($" Median (50th): {sevenNum[3]:F2}"); +Console.WriteLine($" Q3 (75th): {sevenNum[4]:F2}"); +Console.WriteLine($" 90th percentile: {sevenNum[5]:F2}"); +Console.WriteLine($" Maximum: {sevenNum[6]:F2}"); +``` + +## Covariance and Correlation + +### Covariance + +Measures how two variables vary together: + +```cs +double[] x = { 1, 2, 3, 4, 5 }; +double[] y = { 2, 4, 5, 4, 5 }; + +// Sample covariance +double cov = Statistics.Covariance(x, y); + +// Population covariance +double popCov = Statistics.PopulationCovariance(x, y); + +Console.WriteLine($"Sample covariance: {cov:F3}"); +Console.WriteLine($"Population covariance: {popCov:F3}"); + +// Interpretation +if (cov > 0) + Console.WriteLine("Positive association: as x increases, y tends to increase"); +else if (cov < 0) + Console.WriteLine("Negative association: as x increases, y tends to decrease"); +else + Console.WriteLine("No linear association"); +``` + +### Correlation + +For correlation coefficients, use the `Correlation` class: + +```cs +using Numerics.Data.Statistics; + +double[] x = { 1, 2, 3, 4, 5 }; +double[] y = { 2, 4, 5, 4, 5 }; + +// Pearson correlation coefficient +double pearson = Correlation.Pearson(x, y); + +// Spearman rank correlation +double spearman = Correlation.Spearman(x, y); + +// Kendall tau correlation +double kendall = Correlation.Kendall(x, y); + +Console.WriteLine($"Pearson r: {pearson:F3}"); +Console.WriteLine($"Spearman ρ: {spearman:F3}"); +Console.WriteLine($"Kendall τ: {kendall:F3}"); + +// Interpretation of Pearson r +if (Math.Abs(pearson) > 0.7) + Console.WriteLine("Strong correlation"); +else if (Math.Abs(pearson) > 0.3) + Console.WriteLine("Moderate correlation"); +else + Console.WriteLine("Weak correlation"); +``` + +## Ranking + +### Rank Statistics + +```cs +double[] data = { 5.2, 3.1, 7.8, 3.1, 9.2, 5.2 }; + +// Compute ranks (in-place, modifies array) +double[] dataCopy = (double[])data.Clone(); +double[] ranks = Statistics.RanksInPlace(dataCopy); + +Console.WriteLine("Value | Rank"); +for (int i = 0; i < data.Length; i++) +{ + Console.WriteLine($"{data[i],5:F1} | {ranks[i],4:F1}"); +} + +// Ranks with ties reported +double[] dataCopy2 = (double[])data.Clone(); +double[] ranks2 = Statistics.RanksInPlace(dataCopy2, out double[] ties); + +Console.WriteLine($"\nNumber of tied groups: {ties.Count(t => t > 1)}"); +``` + +## Entropy + +Shannon entropy for continuous distributions: + +```cs +using Numerics.Distributions; + +double[] sample = new Normal(0, 1).GenerateRandomValues(1000); + +// Estimate entropy using kernel density +Func pdf = x => +{ + var kde = new KernelDensity(sample, bandwidth: 0.5); + return kde.PDF(x); +}; + +double entropy = Statistics.Entropy(sample, pdf); + +Console.WriteLine($"Estimated entropy: {entropy:F3} nats"); +Console.WriteLine($"In bits: {entropy / Math.Log(2):F3}"); + +// For Normal(0,1), theoretical entropy ≈ 1.42 nats +``` + +## Jackknife Resampling + +Leave-one-out resampling for standard error estimation: + +```cs +double[] data = { 10.5, 12.3, 11.8, 15.2, 13.7, 14.1, 16.8, 12.9, 11.2, 14.5 }; + +// Define a statistic function (e.g., median) +Func, double> medianFunc = sample => Statistics.Percentile(sample.ToArray(), 50); + +// Jackknife standard error +double jackknifeSE = Statistics.JackKnifeStandardError(data, medianFunc); + +Console.WriteLine($"Median: {medianFunc(data):F2}"); +Console.WriteLine($"Jackknife SE: {jackknifeSE:F3}"); + +// Get all jackknife samples +double[] jackknifeValues = Statistics.JackKnifeSample(data, medianFunc); + +Console.WriteLine($"Jackknife samples: {jackknifeValues.Length}"); +Console.WriteLine($"Mean of jackknife estimates: {jackknifeValues.Average():F2}"); +``` + +## Practical Examples + +### Example 1: Complete Data Summary + +```cs +using Numerics.Data.Statistics; + +double[] annualRainfall = { 850, 920, 780, 1050, 890, 950, 820, 1100, 870, 980 }; + +Console.WriteLine("Annual Rainfall Analysis (mm)"); +Console.WriteLine("=" + new string('=', 50)); + +// Central tendency +Console.WriteLine("\nCentral Tendency:"); +Console.WriteLine($" Mean: {Statistics.Mean(annualRainfall):F1} mm"); +Console.WriteLine($" Median: {Statistics.Percentile(annualRainfall, 50):F1} mm"); + +// Dispersion +Console.WriteLine("\nDispersion:"); +Console.WriteLine($" Range: {Statistics.Minimum(annualRainfall):F0} - {Statistics.Maximum(annualRainfall):F0} mm"); +Console.WriteLine($" Std Dev: {Statistics.StandardDeviation(annualRainfall):F1} mm"); +Console.WriteLine($" CV: {Statistics.CoefficientOfVariation(annualRainfall):P1}"); + +// Shape +Console.WriteLine("\nShape:"); +Console.WriteLine($" Skewness: {Statistics.Skewness(annualRainfall):F3}"); +Console.WriteLine($" Kurtosis: {Statistics.Kurtosis(annualRainfall):F3}"); + +// Percentiles +var percentiles = Statistics.Percentile(annualRainfall, new double[] { 10, 25, 50, 75, 90 }); +Console.WriteLine("\nPercentiles:"); +Console.WriteLine($" 10th: {percentiles[0]:F1} mm"); +Console.WriteLine($" 25th: {percentiles[1]:F1} mm"); +Console.WriteLine($" 50th: {percentiles[2]:F1} mm"); +Console.WriteLine($" 75th: {percentiles[3]:F1} mm"); +Console.WriteLine($" 90th: {percentiles[4]:F1} mm"); +``` + +### Example 2: Comparing Two Datasets + +```cs +double[] before = { 85, 92, 78, 95, 88, 91, 82, 89 }; +double[] after = { 88, 95, 81, 98, 91, 94, 85, 92 }; + +Console.WriteLine("Before vs After Treatment"); +Console.WriteLine("=" + new string('=', 50)); + +Console.WriteLine($"\n{"Statistic",-20} | {"Before",10} | {"After",10} | {"Change",10}"); +Console.WriteLine(new string('-', 55)); + +double meanBefore = Statistics.Mean(before); +double meanAfter = Statistics.Mean(after); +Console.WriteLine($"{"Mean",-20} | {meanBefore,10:F2} | {meanAfter,10:F2} | {meanAfter - meanBefore,10:F2}"); + +double sdBefore = Statistics.StandardDeviation(before); +double sdAfter = Statistics.StandardDeviation(after); +Console.WriteLine($"{"Std Dev",-20} | {sdBefore,10:F2} | {sdAfter,10:F2} | {sdAfter - sdBefore,10:F2}"); + +double medBefore = Statistics.Percentile(before, 50); +double medAfter = Statistics.Percentile(after, 50); +Console.WriteLine($"{"Median",-20} | {medBefore,10:F2} | {medAfter,10:F2} | {medAfter - medBefore,10:F2}"); + +// Effect size (Cohen's d) +double pooledSD = Math.Sqrt((Statistics.Variance(before) + Statistics.Variance(after)) / 2); +double cohenD = (meanAfter - meanBefore) / pooledSD; +Console.WriteLine($"\nEffect size (Cohen's d): {cohenD:F3}"); +``` + +### Example 3: Time Series Summary + +```cs +double[] monthlyFlow = { 125, 135, 180, 220, 250, 280, 260, 230, 190, 150, 130, 120 }; +string[] months = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; + +Console.WriteLine("Monthly Streamflow Summary (cfs)"); +Console.WriteLine("=" + new string('=', 50)); + +// Overall statistics +Console.WriteLine($"\nAnnual Statistics:"); +Console.WriteLine($" Mean: {Statistics.Mean(monthlyFlow):F0} cfs"); +Console.WriteLine($" Std Dev: {Statistics.StandardDeviation(monthlyFlow):F0} cfs"); +Console.WriteLine($" Min: {Statistics.Minimum(monthlyFlow):F0} cfs ({months[Array.IndexOf(monthlyFlow, Statistics.Minimum(monthlyFlow))]})"); +Console.WriteLine($" Max: {Statistics.Maximum(monthlyFlow):F0} cfs ({months[Array.IndexOf(monthlyFlow, Statistics.Maximum(monthlyFlow))]})"); + +// Seasonal means +double springMean = Statistics.Mean(new[] { monthlyFlow[2], monthlyFlow[3], monthlyFlow[4] }); +double summerMean = Statistics.Mean(new[] { monthlyFlow[5], monthlyFlow[6], monthlyFlow[7] }); +double fallMean = Statistics.Mean(new[] { monthlyFlow[8], monthlyFlow[9], monthlyFlow[10] }); +double winterMean = Statistics.Mean(new[] { monthlyFlow[11], monthlyFlow[0], monthlyFlow[1] }); + +Console.WriteLine($"\nSeasonal Means:"); +Console.WriteLine($" Spring (MAM): {springMean:F0} cfs"); +Console.WriteLine($" Summer (JJA): {summerMean:F0} cfs"); +Console.WriteLine($" Fall (SON): {fallMean:F0} cfs"); +Console.WriteLine($" Winter (DJF): {winterMean:F0} cfs"); +``` + +## Running Statistics + +For streaming data or very large datasets: + +```cs +using Numerics.Data.Statistics; + +var runningStats = new RunningStatistics(); + +// Add data points incrementally +double[] newData = { 10.5, 12.3, 11.8, 15.2, 13.7 }; + +foreach (var value in newData) +{ + runningStats.Push(value); +} + +Console.WriteLine($"Count: {runningStats.Count}"); +Console.WriteLine($"Mean: {runningStats.Mean:F2}"); +Console.WriteLine($"Variance: {runningStats.Variance:F2}"); +Console.WriteLine($"Std Dev: {runningStats.StandardDeviation:F2}"); +Console.WriteLine($"Skewness: {runningStats.Skewness:F3}"); +Console.WriteLine($"Kurtosis: {runningStats.Kurtosis:F3}"); +Console.WriteLine($"Min: {runningStats.Minimum:F2}"); +Console.WriteLine($"Max: {runningStats.Maximum:F2}"); +``` + +## Best Practices + +1. **Check for NaN and Inf**: All methods return NaN if data contains NaN or is empty +2. **Sort when needed**: Some methods require sorted data - check documentation +3. **Use appropriate sample size**: Small samples (n < 30) need careful interpretation +4. **Consider outliers**: L-moments are more robust than product moments +5. **Understand bias**: Sample statistics (n-1 denominator) vs population (n denominator) +6. **Use running statistics**: For streaming data or memory constraints +7. **Vectorize operations**: Use batch methods for multiple percentiles + +--- + +## References + +[1] Hosking, J. R. M. (1990). L-moments: Analysis and estimation of distributions using linear combinations of order statistics. *Journal of the Royal Statistical Society: Series B (Methodological)*, 52(1), 105-124. + +--- + +[← Back to Index](../index.md) | [Next: Goodness-of-Fit →](goodness-of-fit.md) diff --git a/docs/statistics/goodness-of-fit.md b/docs/statistics/goodness-of-fit.md new file mode 100644 index 00000000..bc713d18 --- /dev/null +++ b/docs/statistics/goodness-of-fit.md @@ -0,0 +1,596 @@ +# Goodness-of-Fit Metrics + +[← Previous: Descriptive Statistics](descriptive.md) | [Back to Index](../index.md) | [Next: MCMC Sampling →](../sampling/mcmc.md) + +Goodness-of-fit (GOF) metrics evaluate how well a statistical model fits observed data. The ***Numerics*** library provides comprehensive metrics for model selection, distribution fitting validation, and hydrological model evaluation. + +## Model Selection Criteria + +### Information Criteria + +Information criteria balance model fit with complexity, penalizing additional parameters [[1]](#1): + +```cs +using Numerics.Data.Statistics; +using Numerics.Distributions; + +double[] data = { 12500, 15300, 11200, 18700, 14100, 16800, 13400, 17200 }; + +// Fit a distribution +var gev = new GeneralizedExtremeValue(); +gev.Estimate(data, ParameterEstimationMethod.MethodOfLinearMoments); + +// Compute log-likelihood +double logLikelihood = 0; +foreach (var x in data) +{ + logLikelihood += gev.LogPDF(x); +} + +int n = data.Length; +int k = gev.NumberOfParameters; + +// Akaike Information Criterion +double aic = GoodnessOfFit.AIC(k, logLikelihood); + +// Corrected AIC (for small samples) +double aicc = GoodnessOfFit.AICc(n, k, logLikelihood); + +// Bayesian Information Criterion +double bic = GoodnessOfFit.BIC(n, k, logLikelihood); + +Console.WriteLine($"Model Selection Criteria:"); +Console.WriteLine($" AIC: {aic:F2} (lower is better)"); +Console.WriteLine($" AICc: {aicc:F2} (small sample correction)"); +Console.WriteLine($" BIC: {bic:F2} (stronger penalty for parameters)"); +``` + +**Formulas:** +``` +AIC = 2k - 2·ln(L) +AICc = AIC + 2k(k+1)/(n-k-1) +BIC = k·ln(n) - 2·ln(L) +``` + +Where: +- `k` = number of parameters +- `n` = sample size +- `L` = likelihood + +### Comparing Multiple Models + +```cs +double[] data = { 12500, 15300, 11200, 18700, 14100, 16800, 13400, 17200 }; + +// Fit multiple distributions +var models = new[] +{ + ("GEV", new GeneralizedExtremeValue()), + ("Gumbel", new Gumbel()), + ("LogNormal", new LogNormal()), + ("Normal", new Normal()) +}; + +var results = new List<(string Name, double AIC, double BIC, double LogLik)>(); + +foreach (var (name, dist) in models) +{ + dist.Estimate(data, ParameterEstimationMethod.MethodOfLinearMoments); + + double logLik = data.Sum(x => dist.LogPDF(x)); + double aic = GoodnessOfFit.AIC(dist.NumberOfParameters, logLik); + double bic = GoodnessOfFit.BIC(data.Length, dist.NumberOfParameters, logLik); + + results.Add((name, aic, bic, logLik)); +} + +// Sort by AIC (lower is better) +results.Sort((a, b) => a.AIC.CompareTo(b.AIC)); + +Console.WriteLine("Model Comparison:"); +Console.WriteLine("Model | Params | AIC | BIC | Log-Lik"); +Console.WriteLine("--------------------------------------------------------"); +foreach (var (name, aic, bic, logLik) in results) +{ + var dist = models.First(m => m.Item1 == name).Item2; + Console.WriteLine($"{name,-11} | {dist.NumberOfParameters,6} | {aic,7:F2} | {bic,7:F2} | {logLik,7:F2}"); +} + +Console.WriteLine($"\nBest model by AIC: {results[0].Name}"); +``` + +### AIC Weights + +Compute relative model probabilities: + +```cs +var aicValues = results.Select(r => r.AIC).ToList(); +double[] aicWeights = GoodnessOfFit.AICWeights(aicValues); + +Console.WriteLine("\nModel Weights (relative probabilities):"); +for (int i = 0; i < results.Count; i++) +{ + Console.WriteLine($"{results[i].Name,-11}: {aicWeights[i]:P1}"); +} +``` + +## Distribution Fit Metrics + +### Root Mean Square Error (RMSE) + +```cs +double[] observed = { 12500, 15300, 11200, 18700, 14100, 16800, 13400, 17200 }; + +var gev = new GeneralizedExtremeValue(); +gev.Estimate(observed, ParameterEstimationMethod.MethodOfLinearMoments); + +// RMSE using empirical plotting positions +double rmse = GoodnessOfFit.RMSE(observed, gev); + +Console.WriteLine($"RMSE: {rmse:F2}"); + +// With custom plotting positions +var plottingPos = PlottingPositions.Weibull(observed.Length); +double rmse2 = GoodnessOfFit.RMSE(observed, plottingPos, gev); + +// With parameter penalty +double rmse3 = GoodnessOfFit.RMSE(observed, gev.InverseCDF(plottingPos).ToArray(), k: gev.NumberOfParameters); + +Console.WriteLine($"RMSE (Weibull plotting): {rmse2:F2}"); +Console.WriteLine($"RMSE (with penalty): {rmse3:F2}"); +``` + +### RMSE Weights + +For model averaging: + +```cs +var rmseValues = new List(); + +foreach (var (name, dist) in models) +{ + dist.Estimate(observed, ParameterEstimationMethod.MethodOfLinearMoments); + rmseValues.Add(GoodnessOfFit.RMSE(observed, dist)); +} + +double[] rmseWeights = GoodnessOfFit.RMSEWeights(rmseValues); + +Console.WriteLine("RMSE-based Weights:"); +for (int i = 0; i < models.Length; i++) +{ + Console.WriteLine($"{models[i].Item1,-11}: {rmseWeights[i]:P1}"); +} +``` + +### Mean Square Error (MSE) and Mean Absolute Error (MAE) + +```cs +double[] observed = { 100, 105, 98, 110, 95 }; +double[] modeled = { 102, 104, 99, 108, 96 }; + +double mse = GoodnessOfFit.MSE(observed, modeled); +double mae = GoodnessOfFit.MAE(observed, modeled); +double rmse = Math.Sqrt(mse); + +Console.WriteLine($"MSE: {mse:F2}"); +Console.WriteLine($"RMSE: {rmse:F2}"); +Console.WriteLine($"MAE: {mae:F2}"); +``` + +## Hydrological Model Performance + +### Nash-Sutcliffe Efficiency (NSE) + +The NSE is widely used in hydrology [[2]](#2): + +```cs +double[] observed = { 125, 135, 180, 220, 250, 280, 260, 230, 190, 150, 130, 120 }; +double[] modeled = { 120, 140, 175, 225, 245, 275, 265, 225, 195, 145, 135, 115 }; + +double nse = GoodnessOfFit.NashSutcliffeEfficiency(observed, modeled); + +Console.WriteLine($"Nash-Sutcliffe Efficiency: {nse:F3}"); + +// Interpretation +if (nse >= 0.75) + Console.WriteLine("Very good performance"); +else if (nse >= 0.65) + Console.WriteLine("Good performance"); +else if (nse >= 0.50) + Console.WriteLine("Satisfactory performance"); +else if (nse >= 0.40) + Console.WriteLine("Acceptable performance"); +else + Console.WriteLine("Unsatisfactory performance"); +``` + +**Formula:** +``` +NSE = 1 - Σ(O - M)² / Σ(O - Ō)² +``` + +Range: (-∞, 1], where 1 is perfect fit, 0 means model is as good as mean, <0 means worse than mean. + +### Log Nash-Sutcliffe Efficiency + +For better performance on low flows: + +```cs +double logNSE = GoodnessOfFit.LogNashSutcliffeEfficiency(observed, modeled); + +Console.WriteLine($"Log-NSE: {logNSE:F3}"); +Console.WriteLine("Log-NSE emphasizes low flow performance"); +``` + +### Kling-Gupta Efficiency (KGE) + +Decomposes error into correlation, bias, and variability [[3]](#3): + +```cs +double kge = GoodnessOfFit.KlingGuptaEfficiency(observed, modeled); + +Console.WriteLine($"Kling-Gupta Efficiency: {kge:F3}"); + +// Modified KGE (variability ratio based on CV) +double kgeMod = GoodnessOfFit.KlingGuptaEfficiencyMod(observed, modeled); + +Console.WriteLine($"Modified KGE: {kgeMod:F3}"); + +// KGE interpretation +if (kge >= 0.75) + Console.WriteLine("Good model performance"); +else if (kge >= 0.50) + Console.WriteLine("Intermediate performance"); +else + Console.WriteLine("Poor performance"); +``` + +**Formula:** +``` +KGE = 1 - √[(r-1)² + (β-1)² + (γ-1)²] +``` + +Where: +- `r` = correlation coefficient +- `β` = bias ratio (μ_modeled / μ_observed) +- `γ` = variability ratio (CV_modeled / CV_observed) + +### Percent Bias (PBIAS) + +```cs +double pbias = GoodnessOfFit.PBIAS(observed, modeled); + +Console.WriteLine($"Percent Bias: {pbias:F1}%"); + +// Interpretation +if (Math.Abs(pbias) < 10) + Console.WriteLine("Very good (bias < ±10%)"); +else if (Math.Abs(pbias) < 15) + Console.WriteLine("Good (bias < ±15%)"); +else if (Math.Abs(pbias) < 25) + Console.WriteLine("Satisfactory (bias < ±25%)"); +else + Console.WriteLine("Unsatisfactory (bias ≥ ±25%)"); + +if (pbias > 0) + Console.WriteLine("Model underestimates (positive bias)"); +else if (pbias < 0) + Console.WriteLine("Model overestimates (negative bias)"); +``` + +### RMSE-Observations Standard Deviation Ratio (RSR) + +```cs +double rsr = GoodnessOfFit.RSR(observed, modeled); + +Console.WriteLine($"RSR: {rsr:F3}"); + +// Performance ratings +if (rsr <= 0.50) + Console.WriteLine("Very good (RSR ≤ 0.50)"); +else if (rsr <= 0.60) + Console.WriteLine("Good (0.50 < RSR ≤ 0.60)"); +else if (rsr <= 0.70) + Console.WriteLine("Satisfactory (0.60 < RSR ≤ 0.70)"); +else + Console.WriteLine("Unsatisfactory (RSR > 0.70)"); +``` + +### R-Squared (Coefficient of Determination) + +```cs +double r2 = GoodnessOfFit.RSquared(observed, modeled); + +Console.WriteLine($"R²: {r2:F3}"); +Console.WriteLine($"Model explains {r2:P1} of variance"); +``` + +### Index of Agreement + +Willmott's index of agreement: + +```cs +double ioa = GoodnessOfFit.IndexOfAgreement(observed, modeled); +double ioaMod = GoodnessOfFit.ModifiedIndexOfAgreement(observed, modeled); +double ioaRefined = GoodnessOfFit.RefinedIndexOfAgreement(observed, modeled); + +Console.WriteLine($"Index of Agreement: {ioa:F3}"); +Console.WriteLine($"Modified IoA: {ioaMod:F3}"); +Console.WriteLine($"Refined IoA: {ioaRefined:F3}"); + +// Range: [0, 1] where 1 is perfect agreement +``` + +### Volumetric Efficiency + +For water balance assessment: + +```cs +double ve = GoodnessOfFit.VolumetricEfficiency(observed, modeled); + +Console.WriteLine($"Volumetric Efficiency: {ve:F3}"); + +// VE = 1 - |Σ(O - M)| / Σ(O) +// Perfect when VE = 1 (volumes match exactly) +``` + +## Error Metrics + +### Mean Absolute Percentage Error (MAPE) + +```cs +double[] observed = { 100, 105, 98, 110, 95, 102 }; +double[] modeled = { 102, 104, 99, 108, 96, 101 }; + +double mape = GoodnessOfFit.MAPE(observed, modeled); + +Console.WriteLine($"MAPE: {mape:F2}%"); + +// Symmetric MAPE (better for asymmetric errors) +double smape = GoodnessOfFit.sMAPE(observed, modeled); + +Console.WriteLine($"sMAPE: {smape:F2}%"); +``` + +## Distribution-Specific Tests + +### Kolmogorov-Smirnov Test + +Tests if data comes from a specified distribution: + +```cs +double[] data = { 12500, 15300, 11200, 18700, 14100, 16800, 13400, 17200 }; + +var gev = new GeneralizedExtremeValue(); +gev.Estimate(data, ParameterEstimationMethod.MethodOfLinearMoments); + +double ksStatistic = GoodnessOfFit.KolmogorovSmirnov(data, gev); + +Console.WriteLine($"Kolmogorov-Smirnov D: {ksStatistic:F4}"); +Console.WriteLine("Smaller D indicates better fit"); + +// Critical value at α=0.05 for n=8: approximately 0.457 +// If D < critical value, fail to reject null hypothesis (good fit) +``` + +### Anderson-Darling Test + +More sensitive to tail deviations: + +```cs +double adStatistic = GoodnessOfFit.AndersonDarling(data, gev); + +Console.WriteLine($"Anderson-Darling A²: {adStatistic:F4}"); +Console.WriteLine("Smaller A² indicates better fit"); +``` + +### Chi-Squared Test + +For discrete or binned data: + +```cs +double chiSq = GoodnessOfFit.ChiSquared(data, gev); + +Console.WriteLine($"Chi-Squared χ²: {chiSq:F4}"); +Console.WriteLine("Compare with χ² critical value from tables"); +``` + +## Classification Metrics + +For binary classification problems: + +```cs +double[] observed = { 1, 0, 1, 1, 0, 1, 0, 0, 1, 1 }; // 1 = positive, 0 = negative +double[] predicted = { 1, 0, 1, 1, 0, 0, 1, 0, 1, 0 }; + +double accuracy = GoodnessOfFit.Accuracy(observed, predicted); +double precision = GoodnessOfFit.Precision(observed, predicted); +double recall = GoodnessOfFit.Recall(observed, predicted); +double f1 = GoodnessOfFit.F1Score(observed, predicted); +double specificity = GoodnessOfFit.Specificity(observed, predicted); +double balancedAcc = GoodnessOfFit.BalancedAccuracy(observed, predicted); + +Console.WriteLine("Classification Metrics:"); +Console.WriteLine($" Accuracy: {accuracy:P1}"); +Console.WriteLine($" Precision: {precision:P1}"); +Console.WriteLine($" Recall (Sensitivity): {recall:P1}"); +Console.WriteLine($" F1 Score: {f1:F3}"); +Console.WriteLine($" Specificity: {specificity:P1}"); +Console.WriteLine($" Balanced Accuracy: {balancedAcc:P1}"); +``` + +## Practical Examples + +### Example 1: Complete Distribution Comparison + +```cs +using Numerics.Data.Statistics; +using Numerics.Distributions; + +double[] annualPeaks = { 12500, 15300, 11200, 18700, 14100, 16800, 13400, 17200, 10500, 19300 }; + +// Candidate distributions +var candidates = new (string Name, IUnivariateDistribution Dist)[] +{ + ("LP3", new LogPearsonTypeIII()), + ("GEV", new GeneralizedExtremeValue()), + ("Gumbel", new Gumbel()), + ("LogNormal", new LogNormal()) +}; + +Console.WriteLine("Distribution Comparison for Annual Peak Flows"); +Console.WriteLine("=" + new string('=', 70)); + +var results = new List<(string Name, double AIC, double BIC, double RMSE, double KS, double AD)>(); + +foreach (var (name, dist) in candidates) +{ + // Fit distribution + dist.Estimate(annualPeaks, ParameterEstimationMethod.MethodOfLinearMoments); + + // Compute metrics + double logLik = annualPeaks.Sum(x => dist.LogPDF(x)); + double aic = GoodnessOfFit.AIC(dist.NumberOfParameters, logLik); + double bic = GoodnessOfFit.BIC(annualPeaks.Length, dist.NumberOfParameters, logLik); + double rmse = GoodnessOfFit.RMSE(annualPeaks, dist); + double ks = GoodnessOfFit.KolmogorovSmirnov(annualPeaks, dist); + double ad = GoodnessOfFit.AndersonDarling(annualPeaks, dist); + + results.Add((name, aic, bic, rmse, ks, ad)); +} + +// Display results +Console.WriteLine("\nDistribution | Params | AIC | BIC | RMSE | K-S | A-D"); +Console.WriteLine("-----------------------------------------------------------------------"); + +foreach (var r in results) +{ + var dist = candidates.First(c => c.Name == r.Name).Dist; + Console.WriteLine($"{r.Name,-12} | {dist.NumberOfParameters,6} | {r.AIC,7:F1} | {r.BIC,7:F1} | " + + $"{r.RMSE,6:F0} | {r.KS,6:F4} | {r.AD,6:F4}"); +} + +// Recommendations +var bestAIC = results.OrderBy(r => r.AIC).First(); +var bestBIC = results.OrderBy(r => r.BIC).First(); +var bestRMSE = results.OrderBy(r => r.RMSE).First(); + +Console.WriteLine($"\nBest by AIC: {bestAIC.Name}"); +Console.WriteLine($"Best by BIC: {bestBIC.Name}"); +Console.WriteLine($"Best by RMSE: {bestRMSE.Name}"); +``` + +### Example 2: Hydrological Model Evaluation + +```cs +double[] observedFlow = { 125, 135, 180, 220, 250, 280, 260, 230, 190, 150, 130, 120 }; +double[] modeledFlow = { 120, 140, 175, 225, 245, 275, 265, 225, 195, 145, 135, 115 }; + +Console.WriteLine("Hydrological Model Performance Evaluation"); +Console.WriteLine("=" + new string('=', 50)); + +// Compute all metrics +double nse = GoodnessOfFit.NashSutcliffeEfficiency(observedFlow, modeledFlow); +double logNSE = GoodnessOfFit.LogNashSutcliffeEfficiency(observedFlow, modeledFlow); +double kge = GoodnessOfFit.KlingGuptaEfficiency(observedFlow, modeledFlow); +double pbias = GoodnessOfFit.PBIAS(observedFlow, modeledFlow); +double rsr = GoodnessOfFit.RSR(observedFlow, modeledFlow); +double r2 = GoodnessOfFit.RSquared(observedFlow, modeledFlow); +double rmse = GoodnessOfFit.RMSE(observedFlow, modeledFlow); +double mae = GoodnessOfFit.MAE(observedFlow, modeledFlow); + +Console.WriteLine("\nPerformance Metrics:"); +Console.WriteLine($" NSE: {nse,6:F3} {GetNSERating(nse)}"); +Console.WriteLine($" Log-NSE: {logNSE,6:F3}"); +Console.WriteLine($" KGE: {kge,6:F3} {GetKGERating(kge)}"); +Console.WriteLine($" PBIAS: {pbias,6:F1}% {GetPBIASRating(pbias)}"); +Console.WriteLine($" RSR: {rsr,6:F3} {GetRSRRating(rsr)}"); +Console.WriteLine($" R²: {r2,6:F3}"); +Console.WriteLine($" RMSE: {rmse,6:F1} cfs"); +Console.WriteLine($" MAE: {mae,6:F1} cfs"); + +// Helper functions for ratings +string GetNSERating(double nse) +{ + if (nse >= 0.75) return "(Very Good)"; + if (nse >= 0.65) return "(Good)"; + if (nse >= 0.50) return "(Satisfactory)"; + if (nse >= 0.40) return "(Acceptable)"; + return "(Unsatisfactory)"; +} + +string GetKGERating(double kge) +{ + if (kge >= 0.75) return "(Good)"; + if (kge >= 0.50) return "(Intermediate)"; + return "(Poor)"; +} + +string GetPBIASRating(double pbias) +{ + var abs = Math.Abs(pbias); + if (abs < 10) return "(Very Good)"; + if (abs < 15) return "(Good)"; + if (abs < 25) return "(Satisfactory)"; + return "(Unsatisfactory)"; +} + +string GetRSRRating(double rsr) +{ + if (rsr <= 0.50) return "(Very Good)"; + if (rsr <= 0.60) return "(Good)"; + if (rsr <= 0.70) return "(Satisfactory)"; + return "(Unsatisfactory)"; +} +``` + +### Example 3: Time Series Model Selection + +```cs +double[] observed = { 100, 105, 98, 110, 95, 102, 108, 97, 103, 106 }; +double[] model1 = { 102, 104, 99, 108, 96, 101, 107, 98, 102, 105 }; +double[] model2 = { 101, 106, 97, 111, 94, 103, 109, 96, 104, 107 }; +double[] model3 = { 100, 105, 98, 110, 95, 102, 108, 97, 103, 106 }; + +var models = new[] { ("Model 1", model1), ("Model 2", model2), ("Model 3", model3) }; + +Console.WriteLine("Time Series Model Comparison"); +Console.WriteLine("=" + new string('=', 60)); +Console.WriteLine("Model | RMSE | MAE | MAPE | NSE | R²"); +Console.WriteLine("-------------------------------------------------------"); + +foreach (var (name, modeled) in models) +{ + double rmse = Math.Sqrt(GoodnessOfFit.MSE(observed, modeled)); + double mae = GoodnessOfFit.MAE(observed, modeled); + double mape = GoodnessOfFit.MAPE(observed, modeled); + double nse = GoodnessOfFit.NashSutcliffeEfficiency(observed, modeled); + double r2 = GoodnessOfFit.RSquared(observed, modeled); + + Console.WriteLine($"{name,-7} | {rmse,5:F2} | {mae,5:F2} | {mape,5:F2} | {nse,5:F3} | {r2,5:F3}"); +} +``` + +## Best Practices + +1. **Use multiple metrics** - No single metric captures all aspects of fit +2. **Information criteria** - Use AIC/BIC for model selection with different parameters +3. **Hydrological standards** - Follow Moriasi et al. [[2]](#2) criteria for hydrology +4. **Distribution tests** - Use K-S, A-D, or χ² for distribution validation +5. **Sample size** - Information criteria more reliable with n > 30 +6. **Outliers** - Consider robust metrics like MAE instead of RMSE +7. **Context matters** - Different applications prioritize different metrics + +--- + +## References + +[1] Akaike, H. (1974). A new look at the statistical model identification. *IEEE Transactions on Automatic Control*, 19(6), 716-723. + +[2] Moriasi, D. N., Arnold, J. G., Van Liew, M. W., Bingner, R. L., Harmel, R. D., & Veith, T. L. (2007). Model evaluation guidelines for systematic quantification of accuracy in watershed simulations. *Transactions of the ASABE*, 50(3), 885-900. + +[3] Gupta, H. V., Kling, H., Yilmaz, K. K., & Martinez, G. F. (2009). Decomposition of the mean squared error and NSE performance criteria: Implications for improving hydrological modelling. *Journal of Hydrology*, 377(1-2), 80-91. + +--- + +[← Previous: Descriptive Statistics](descriptive.md) | [Back to Index](../index.md) | [Next: MCMC Sampling →](../sampling/mcmc.md) diff --git a/docs/statistics/hypothesis-tests.md b/docs/statistics/hypothesis-tests.md new file mode 100644 index 00000000..bbe25622 --- /dev/null +++ b/docs/statistics/hypothesis-tests.md @@ -0,0 +1,454 @@ +# Hypothesis Tests + +[← Previous: Goodness-of-Fit](goodness-of-fit.md) | [Back to Index](../index.md) | [Next: Convergence Diagnostics →](../sampling/convergence-diagnostics.md) + +The ***Numerics*** library provides statistical hypothesis tests for comparing samples, testing distributions, and detecting trends. These tests are essential for data analysis, model validation, and quality control. + +## t-Tests + +### One-Sample t-Test + +Test if sample mean differs from hypothesized value: + +```cs +using Numerics.Data.Statistics; + +double[] sample = { 12.5, 13.2, 11.8, 14.1, 12.9, 13.5, 12.2, 13.8 }; +double mu0 = 12.0; // Hypothesized mean + +// Compute t-statistic +double t = HypothesisTests.OneSampleTtest(sample, mu0); + +Console.WriteLine($"One-sample t-test:"); +Console.WriteLine($" t-statistic: {t:F3}"); +Console.WriteLine($" Sample mean: {Statistics.Mean(sample):F2}"); +Console.WriteLine($" Hypothesized mean: {mu0:F2}"); + +// Compare with critical value +int df = sample.Length - 1; +Console.WriteLine($" Degrees of freedom: {df}"); +Console.WriteLine(" If |t| > t_critical (e.g., 2.365 for α=0.05, df=7), reject H₀"); +``` + +**Hypotheses:** +- H₀: μ = μ₀ +- H₁: μ ≠ μ₀ (two-tailed) + +### Two-Sample t-Tests + +#### Equal Variance (Pooled) t-Test + +```cs +double[] sample1 = { 12.5, 13.2, 11.8, 14.1, 12.9 }; +double[] sample2 = { 15.3, 14.8, 15.9, 14.5, 15.1 }; + +// Test if means are equal (assuming equal variances) +double t = HypothesisTests.EqualVarianceTtest(sample1, sample2); + +Console.WriteLine($"Equal variance t-test:"); +Console.WriteLine($" t-statistic: {t:F3}"); +Console.WriteLine($" Sample 1 mean: {Statistics.Mean(sample1):F2}"); +Console.WriteLine($" Sample 2 mean: {Statistics.Mean(sample2):F2}"); + +int df = sample1.Length + sample2.Length - 2; +Console.WriteLine($" Degrees of freedom: {df}"); +``` + +**Hypotheses:** +- H₀: μ₁ = μ₂ +- H₁: μ₁ ≠ μ₂ + +#### Unequal Variance (Welch's) t-Test + +```cs +// Test if means are equal (not assuming equal variances) +double t_welch = HypothesisTests.UnequalVarianceTtest(sample1, sample2); + +Console.WriteLine($"\nWelch's t-test:"); +Console.WriteLine($" t-statistic: {t_welch:F3}"); +Console.WriteLine(" Use when variances appear different"); +``` + +#### Paired t-Test + +For before/after or matched pairs: + +```cs +double[] before = { 120, 135, 118, 142, 128 }; +double[] after = { 115, 130, 112, 138, 125 }; + +// Test if treatment had effect +double t_paired = HypothesisTests.PairedTtest(before, after); + +Console.WriteLine($"Paired t-test:"); +Console.WriteLine($" t-statistic: {t_paired:F3}"); +Console.WriteLine($" Mean difference: {Statistics.Mean(before) - Statistics.Mean(after):F2}"); + +// Differences +double[] diffs = new double[before.Length]; +for (int i = 0; i < before.Length; i++) + diffs[i] = before[i] - after[i]; + +Console.WriteLine($" SE of difference: {Statistics.StandardDeviation(diffs) / Math.Sqrt(diffs.Length):F2}"); +``` + +**Hypotheses:** +- H₀: μ_diff = 0 +- H₁: μ_diff ≠ 0 + +## F-Test + +Test equality of variances: + +```cs +double[] sample1 = { 10, 12, 14, 16, 18 }; +double[] sample2 = { 11, 13, 15, 17, 19, 21, 23 }; + +// Test if variances are equal +double f = HypothesisTests.Ftest(sample1, sample2); + +Console.WriteLine($"F-test for equal variances:"); +Console.WriteLine($" F-statistic: {f:F3}"); +Console.WriteLine($" Variance 1: {Statistics.Variance(sample1):F2}"); +Console.WriteLine($" Variance 2: {Statistics.Variance(sample2):F2}"); +Console.WriteLine($" df1 = {sample1.Length - 1}, df2 = {sample2.Length - 1}"); +``` + +**Hypotheses:** +- H₀: σ₁² = σ₂² +- H₁: σ₁² ≠ σ₂² + +### F-Test for Nested Models + +Compare restricted and full models: + +```cs +// Example: Testing if additional predictors improve model +double sseRestricted = 150.0; // SSE of restricted model +double sseFull = 120.0; // SSE of full model +int dfRestricted = 47; // n - k_restricted - 1 +int dfFull = 45; // n - k_full - 1 + +HypothesisTests.FtestModels(sseRestricted, sseFull, dfRestricted, dfFull, + out double fStat, out double pValue); + +Console.WriteLine($"F-test for model comparison:"); +Console.WriteLine($" F-statistic: {fStat:F3}"); +Console.WriteLine($" p-value: {pValue:F4}"); + +if (pValue < 0.05) + Console.WriteLine(" Reject H₀: Full model is significantly better"); +else + Console.WriteLine(" Fail to reject H₀: Models not significantly different"); +``` + +## Normality Tests + +### Jarque-Bera Test + +Tests if data follows normal distribution using skewness and kurtosis: + +```cs +double[] data = { 10.5, 12.3, 11.8, 15.2, 13.7, 14.1, 16.8, 12.9, 11.2, 14.5 }; + +// Test for normality +double jb = HypothesisTests.JarqueBeraTest(data); + +Console.WriteLine($"Jarque-Bera normality test:"); +Console.WriteLine($" JB statistic: {jb:F3}"); +Console.WriteLine($" Skewness: {Statistics.Skewness(data):F3}"); +Console.WriteLine($" Kurtosis: {Statistics.Kurtosis(data):F3}"); +Console.WriteLine(" Critical value (α=0.05): 5.99 (χ² with df=2)"); + +if (jb < 5.99) + Console.WriteLine(" Fail to reject H₀: Data appears normally distributed"); +else + Console.WriteLine(" Reject H₀: Data not normally distributed"); +``` + +**Hypotheses:** +- H₀: Data is normally distributed +- H₁: Data is not normal + +## Randomness Tests + +### Wald-Wolfowitz Runs Test + +Tests if sequence is random: + +```cs +double[] sequence = { 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0 }; + +double z = HypothesisTests.WaldWolfowitzTest(sequence); + +Console.WriteLine($"Wald-Wolfowitz runs test:"); +Console.WriteLine($" z-statistic: {z:F3}"); + +if (Math.Abs(z) > 1.96) + Console.WriteLine(" Reject H₀: Sequence is not random"); +else + Console.WriteLine(" Fail to reject H₀: Sequence appears random"); +``` + +### Ljung-Box Test + +Tests for autocorrelation in time series: + +```cs +double[] timeSeries = { 12.5, 13.2, 11.8, 14.1, 12.9, 13.5, 12.2, 13.8, 14.5, 13.1 }; +int lagMax = 5; // Test lags 1 through 5 + +double q = HypothesisTests.LjungBoxTest(timeSeries, lagMax); + +Console.WriteLine($"Ljung-Box test for autocorrelation:"); +Console.WriteLine($" Q-statistic: {q:F3}"); +Console.WriteLine($" Lags tested: {lagMax}"); +Console.WriteLine($" Critical value (α=0.05, df={lagMax}): ~11.07"); + +if (q > 11.07) + Console.WriteLine(" Reject H₀: Significant autocorrelation present"); +else + Console.WriteLine(" Fail to reject H₀: No significant autocorrelation"); +``` + +**Hypotheses:** +- H₀: No autocorrelation up to lag k +- H₁: Some autocorrelation exists + +## Non-Parametric Tests + +### Mann-Whitney U Test + +Non-parametric alternative to two-sample t-test: + +```cs +double[] group1 = { 12, 15, 18, 21, 24 }; +double[] group2 = { 10, 13, 16, 19, 22, 25 }; + +double u = HypothesisTests.MannWhitneyTest(group1, group2); + +Console.WriteLine($"Mann-Whitney U test:"); +Console.WriteLine($" U-statistic: {u:F3}"); +Console.WriteLine(" Tests if distributions are different (rank-based)"); +Console.WriteLine(" No assumption of normality required"); +``` + +**Hypotheses:** +- H₀: Distributions are equal +- H₁: Distributions differ + +## Trend Tests + +### Mann-Kendall Trend Test + +Detects monotonic trends in time series: + +```cs +double[] timeSeries = { 10, 12, 11, 15, 14, 18, 17, 21, 20, 24 }; + +double s = HypothesisTests.MannKendallTest(timeSeries); + +Console.WriteLine($"Mann-Kendall trend test:"); +Console.WriteLine($" S-statistic: {s:F3}"); + +if (s > 1.96) + Console.WriteLine(" Significant increasing trend detected"); +else if (s < -1.96) + Console.WriteLine(" Significant decreasing trend detected"); +else + Console.WriteLine(" No significant trend"); +``` + +**Hypotheses:** +- H₀: No monotonic trend +- H₁: Monotonic trend exists + +### Linear Trend Test + +Tests for linear relationship: + +```cs +double[] time = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; +double[] values = { 10.5, 11.2, 12.8, 13.1, 14.5, 15.2, 16.1, 16.8, 17.5, 18.2 }; + +double t = HypothesisTests.LinearTrendTest(time, values); + +Console.WriteLine($"Linear trend test:"); +Console.WriteLine($" t-statistic: {t:F3}"); +Console.WriteLine(" Tests if slope is significantly different from zero"); + +if (Math.Abs(t) > 2.306) // Critical value for df=8, α=0.05 + Console.WriteLine(" Significant linear trend detected"); +else + Console.WriteLine(" No significant linear trend"); +``` + +## Unimodality Test + +Tests if distribution has single peak: + +```cs +double[] data = { 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10 }; + +double u = HypothesisTests.UnimodalityTest(data); + +Console.WriteLine($"Unimodality test:"); +Console.WriteLine($" Test statistic: {u:F3}"); + +if (u < -1.96) + Console.WriteLine(" Reject H₀: Data is multimodal"); +else + Console.WriteLine(" Fail to reject H₀: Data appears unimodal"); +``` + +## Practical Examples + +### Example 1: Comparing Treatment Groups + +```cs +using Numerics.Data.Statistics; + +// Control and treatment groups +double[] control = { 120, 135, 118, 142, 128, 133, 125, 138 }; +double[] treatment = { 115, 125, 110, 130, 120, 128, 118, 130 }; + +Console.WriteLine("Treatment Comparison Study"); +Console.WriteLine("=" + new string('=', 50)); + +// Descriptive statistics +Console.WriteLine($"\nControl: mean={Statistics.Mean(control):F1}, " + + $"SD={Statistics.StandardDeviation(control):F1}"); +Console.WriteLine($"Treatment: mean={Statistics.Mean(treatment):F1}, " + + $"SD={Statistics.StandardDeviation(treatment):F1}"); + +// Test for equal variances +double f = HypothesisTests.Ftest(control, treatment); +Console.WriteLine($"\nF-test for equal variances: F={f:F3}"); + +// Choose appropriate t-test +double t; +if (f < 4.0) // Approximate F-critical value +{ + t = HypothesisTests.EqualVarianceTtest(control, treatment); + Console.WriteLine($"Equal variance t-test: t={t:F3}"); +} +else +{ + t = HypothesisTests.UnequalVarianceTtest(control, treatment); + Console.WriteLine($"Unequal variance t-test: t={t:F3}"); +} + +if (Math.Abs(t) > 2.145) // Approximate critical value + Console.WriteLine("Conclusion: Significant difference detected (p < 0.05)"); +else + Console.WriteLine("Conclusion: No significant difference (p ≥ 0.05)"); +``` + +### Example 2: Time Series Analysis + +```cs +double[] monthlyData = { 125, 130, 135, 132, 138, 145, 142, 148, 155, 152, 158, 165 }; + +Console.WriteLine("Time Series Analysis"); +Console.WriteLine("=" + new string('=', 50)); + +// Test for trend +double[] months = Enumerable.Range(1, monthlyData.Length).Select(i => (double)i).ToArray(); +double tTrend = HypothesisTests.LinearTrendTest(months, monthlyData); + +Console.WriteLine($"\nLinear trend test: t={tTrend:F3}"); +if (Math.Abs(tTrend) > 2.228) // Critical value for df=10 + Console.WriteLine("Significant trend detected"); + +// Test for autocorrelation +double q = HypothesisTests.LjungBoxTest(monthlyData, lagMax: 3); + +Console.WriteLine($"\nLjung-Box test (lag 3): Q={q:F3}"); +if (q > 7.815) // Chi-squared critical value + Console.WriteLine("Significant autocorrelation detected"); +else + Console.WriteLine("No significant autocorrelation"); + +// Mann-Kendall for monotonic trend +double s = HypothesisTests.MannKendallTest(monthlyData); + +Console.WriteLine($"\nMann-Kendall test: S={s:F3}"); +if (s > 1.96) + Console.WriteLine("Significant increasing trend (non-parametric)"); +``` + +### Example 3: Quality Control + +```cs +// Historical process mean +double mu0 = 50.0; + +// New sample +double[] newSample = { 51.2, 52.1, 49.8, 51.5, 50.9, 52.3, 51.0, 50.5 }; + +Console.WriteLine("Quality Control Check"); +Console.WriteLine("=" + new string('=', 50)); + +// One-sample t-test +double t = HypothesisTests.OneSampleTtest(newSample, mu0); + +Console.WriteLine($"\nHistorical mean: {mu0:F1}"); +Console.WriteLine($"Current sample mean: {Statistics.Mean(newSample):F2}"); +Console.WriteLine($"t-statistic: {t:F3}"); +Console.WriteLine($"Critical value (two-tailed, α=0.05): ±2.365"); + +if (Math.Abs(t) > 2.365) +{ + Console.WriteLine("\nProcess has shifted significantly!"); + Console.WriteLine("Action: Investigate and adjust process"); +} +else +{ + Console.WriteLine("\nProcess remains in control"); +} +``` + +## Interpreting Results + +### p-values +- p < 0.01: Very strong evidence against H₀ +- p < 0.05: Strong evidence against H₀ +- p < 0.10: Weak evidence against H₀ +- p ≥ 0.10: Insufficient evidence to reject H₀ + +### Effect Size +Statistical significance ≠ practical significance. Consider: +- Cohen's d for t-tests: (mean difference) / pooled SD +- Small: d = 0.2, Medium: d = 0.5, Large: d = 0.8 + +### Power +- Probability of detecting true effect +- Influenced by sample size, effect size, α level +- Power ≥ 0.80 typically desired + +## Best Practices + +1. **Check assumptions** before parametric tests (normality, equal variance) +2. **Use non-parametric tests** when assumptions violated +3. **Report effect sizes** along with p-values +4. **Consider multiple testing** corrections if doing many tests +5. **Visualize data** before and after testing +6. **Understand context** - statistical vs practical significance + +## Test Selection Guide + +| Question | Test | +|----------|------| +| One sample mean vs. value | One-sample t-test | +| Two independent means | Two-sample t-test (or Mann-Whitney) | +| Two paired measurements | Paired t-test | +| Two variances | F-test | +| Normality | Jarque-Bera | +| Trend existence | Mann-Kendall | +| Linear relationship | Linear trend test | +| Autocorrelation | Ljung-Box | +| Randomness | Wald-Wolfowitz | + +--- + +[← Previous: Goodness-of-Fit](goodness-of-fit.md) | [Back to Index](../index.md) | [Next: Convergence Diagnostics →](../sampling/convergence-diagnostics.md) diff --git a/global.json b/global.json new file mode 100644 index 00000000..512142d2 --- /dev/null +++ b/global.json @@ -0,0 +1,6 @@ +{ + "sdk": { + "version": "10.0.100", + "rollForward": "latestFeature" + } +} From f6e6c5f10504120b3a138d1423525c7805e72276 Mon Sep 17 00:00:00 2001 From: Tiki Gonzalez Date: Tue, 30 Dec 2025 17:16:56 -0700 Subject: [PATCH 3/3] time series download fixes --- .../Time Series/Support/TimeSeriesDownload.cs | 32 +++++++++++-------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/Numerics/Data/Time Series/Support/TimeSeriesDownload.cs b/Numerics/Data/Time Series/Support/TimeSeriesDownload.cs index 223b60cf..dff08c99 100644 --- a/Numerics/Data/Time Series/Support/TimeSeriesDownload.cs +++ b/Numerics/Data/Time Series/Support/TimeSeriesDownload.cs @@ -173,11 +173,7 @@ public enum HeightUnit /// A downloaded time series. public static async Task FromGHCN(string siteNumber, TimeSeriesType timeSeriesType = TimeSeriesType.DailyPrecipitation, DepthUnit unit = DepthUnit.Inches) { - // Check internet connection - if (!await IsConnectedToInternet()) - { - throw new InvalidOperationException("No internet connection."); - } + // Check site number if (siteNumber.Length != 11) @@ -195,6 +191,12 @@ public static async Task FromGHCN(string siteNumber, TimeSeriesType DateTime? previousDate = null; string tempFilePath = Path.Combine(Path.GetTempPath(), $"{siteNumber}.dly"); + // Check internet connection + if (!await IsConnectedToInternet()) + { + throw new InvalidOperationException("No internet connection."); + } + try { @@ -353,11 +355,7 @@ private static string CreateURLForUSGSDownload(string siteNumber, TimeSeriesType /// The time series type. public static async Task<(TimeSeries TimeSeries, string RawText)> FromUSGS(string siteNumber, TimeSeriesType timeSeriesType = TimeSeriesType.DailyDischarge) { - // Check internet connection - if (!await IsConnectedToInternet()) - { - throw new InvalidOperationException("No internet connection."); - } + // Check site number if (siteNumber.Length != 8) @@ -371,6 +369,12 @@ private static string CreateURLForUSGSDownload(string siteNumber, TimeSeriesType throw new ArgumentException("The time series type cannot be daily precipitation or daily snow.", nameof(timeSeriesType)); } + // Check internet connection + if (!await IsConnectedToInternet()) + { + throw new InvalidOperationException("No internet connection."); + } + var timeSeries = new TimeSeries(); string textDownload = ""; @@ -742,9 +746,7 @@ public static async Task FromABOM( DateTime? startDate = null, DateTime? endDate = null) { - // Check connectivity - if (!await IsConnectedToInternet()) - throw new InvalidOperationException("No internet connection."); + // Validate station number (BOM station numbers are typically 6 digits) if (string.IsNullOrWhiteSpace(stationNumber) || stationNumber.Length < 6) @@ -754,6 +756,10 @@ public static async Task FromABOM( if (timeSeriesType != TimeSeriesType.DailyDischarge && timeSeriesType != TimeSeriesType.DailyStage) throw new ArgumentException("BOM API supports DailyDischarge or DailyStage only.", nameof(timeSeriesType)); + // Check connectivity + if (!await IsConnectedToInternet()) + throw new InvalidOperationException("No internet connection."); + // Set default dates DateTime sd = startDate ?? new DateTime(1800, 1, 1); DateTime ed = endDate ?? DateTime.Today;