Statistics Library Reference
Includes all of the Statistical Accumulators Library
extractor< tag::count > const
accumulator_basestd::size_t
voiddont_care
result_typedont_care
void
Archive &const unsigned int
dont_care
boost::accumulators::depends_on<>
tag::covariance< VariateTypeVariateTag >tag::weighted_covariance< VariateType, VariateTag >
tag::covariance< VariateTypeVariateTag >boost::accumulators::feature_of< tag::abstract_covariance >
tag::weighted_covariance< VariateTypeVariateTag >boost::accumulators::feature_of< tag::covariance< VariateType, VariateTag > >
extractor< tag::abstract_covariance > const
boost::accumulators::depends_on<>
typename tag<Left>::type
typename tag<Right>::type
boost::numeric::functional::outer_product_base< Left, Right, void >
LeftRightstd_vector_tagstd_vector_tagLeft
Right
ublas::matrix< typename functional::multiplies< typename Left::value_type, typename Right::value_type >::result_type >
result_typeLeft &Right &
void
functional::multiplies< Left, Right >
tag::densitytag::weighted_density
tag::weighted_densityboost::accumulators::feature_of< tag::density >
extractor< tag::density > const
boost::accumulators::depends_on< count, min, max >density_cache_sizedensity_num_binsboost::parameter::keyword< density_cache_size > consttag::density::cache_size named parameter tag::density::num_bins named parameter
boost::parameter::keyword< density_num_bins > const
tag::error_of< Feature >tag::error_of< typename as_feature< Feature >::type >
tag::error_of< Feature >tag::error_of< typename as_weighted_feature< Feature >::type >
immediate_meanboost::accumulators::depends_on< variance, count >
meanboost::accumulators::depends_on< lazy_variance, count >
tag::extended_p_squaretag::weighted_extended_p_square
tag::weighted_extended_p_squareboost::accumulators::feature_of< tag::extended_p_square >
extractor< tag::extended_p_square > const
boost::accumulators::depends_on< count >extended_p_square_probabilitiesaccumulators::impl::extended_p_square_impl< mpl::_1 >
boost::parameter::keyword< tag::probabilities > consttag::extended_p_square::probabilities named parameter
tag::extended_p_square_quantile(linear)tag::extended_p_square_quantile
tag::extended_p_square_quantile(quadratic)tag::extended_p_square_quantile_quadratic
tag::weighted_extended_p_square_quantile(linear)tag::weighted_extended_p_square_quantile
tag::weighted_extended_p_square_quantile(quadratic)tag::weighted_extended_p_square_quantile_quadratic
tag::extended_p_square_quantiletag::weighted_extended_p_square_quantile
tag::extended_p_square_quantile_quadratictag::weighted_extended_p_square_quantile_quadratic
tag::extended_p_square_quantileboost::accumulators::feature_of< tag::quantile >
tag::extended_p_square_quantile_quadraticboost::accumulators::feature_of< tag::quantile >
tag::weighted_extended_p_square_quantileboost::accumulators::feature_of< tag::extended_p_square_quantile >
tag::weighted_extended_p_square_quantile_quadraticboost::accumulators::feature_of< tag::extended_p_square_quantile_quadratic >
extractor< tag::extended_p_square_quantile > const
extractor< tag::extended_p_square_quantile_quadratic > const
extractor< tag::weighted_extended_p_square_quantile > const
extractor< tag::weighted_extended_p_square_quantile_quadratic > const
boost::accumulators::depends_on< extended_p_square >accumulators::impl::extended_p_square_quantile_impl< mpl::_1, unweighted, linear >
boost::accumulators::depends_on< extended_p_square >accumulators::impl::extended_p_square_quantile_impl< mpl::_1, unweighted, quadratic >
boost::accumulators::depends_on< weighted_extended_p_square >accumulators::impl::extended_p_square_quantile_impl< mpl::_1, weighted, linear >
boost::accumulators::depends_on< weighted_extended_p_square >accumulators::impl::extended_p_square_quantile_impl< mpl::_1, weighted, quadratic >
tag::kurtosistag::weighted_kurtosis
tag::weighted_kurtosisboost::accumulators::feature_of< tag::kurtosis >
extractor< tag::kurtosis > const
boost::accumulators::depends_on< mean, moment< 2 >, moment< 3 >, moment< 4 > >
extractor< tag::max > const
boost::accumulators::depends_on<>
tag::mean(immediate)tag::immediate_mean
tag::mean(lazy)tag::mean
tag::mean_of_variates< VariateTypeVariateTag >(immediate)tag::immediate_mean_of_variates< VariateType, VariateTag >
tag::mean_of_variates< VariateTypeVariateTag >(lazy)tag::mean_of_variates< VariateType, VariateTag >
tag::mean_of_weights(immediate)tag::immediate_mean_of_weights
tag::mean_of_weights(lazy)tag::mean_of_weights
tag::immediate_meantag::immediate_weighted_mean
tag::immediate_mean_of_variates< VariateTypeVariateTag >tag::immediate_weighted_mean_of_variates< VariateType, VariateTag >
tag::meantag::weighted_mean
tag::mean_of_variates< VariateTypeVariateTag >tag::weighted_mean_of_variates< VariateType, VariateTag >
tag::immediate_meanboost::accumulators::feature_of< tag::mean >
tag::immediate_mean_of_variates< VariateTypeVariateTag >boost::accumulators::feature_of< tag::mean_of_variates< VariateType, VariateTag > >
tag::immediate_mean_of_weightsboost::accumulators::feature_of< tag::mean_of_weights >
tag::immediate_weighted_meanboost::accumulators::feature_of< tag::immediate_mean >
tag::immediate_weighted_mean_of_variates< VariateTypeVariateTag >boost::accumulators::feature_of< tag::immediate_mean_of_variates< VariateType, VariateTag > >
tag::weighted_meanboost::accumulators::feature_of< tag::mean >
tag::weighted_mean_of_variates< VariateTypeVariateTag >boost::accumulators::feature_of< tag::mean_of_variates< VariateType, VariateTag > >
extractor< tag::mean > const
extractor< tag::mean_of_weights > const
boost::accumulators::depends_on< count >boost::accumulators::depends_on< count >mpl::true_
boost::accumulators::depends_on< count, sum >boost::accumulators::depends_on< count, sum_of_weights >mpl::true_
tag::median(with_density)tag::with_density_median
tag::median(with_p_square_cumulative_distribution)tag::with_p_square_cumulative_distribution_median
tag::median(with_p_square_quantile)tag::median
tag::mediantag::weighted_median
tag::with_density_mediantag::with_density_weighted_median
tag::with_p_square_cumulative_distribution_mediantag::with_p_square_cumulative_distribution_weighted_median
tag::weighted_medianboost::accumulators::feature_of< tag::median >
tag::with_density_medianboost::accumulators::feature_of< tag::median >
tag::with_density_weighted_medianboost::accumulators::feature_of< tag::with_density_median >
tag::with_p_square_cumulative_distribution_medianboost::accumulators::feature_of< tag::median >
tag::with_p_square_cumulative_distribution_weighted_medianboost::accumulators::feature_of< tag::with_p_square_cumulative_distribution_median >
extractor< tag::median > const
extractor< tag::with_density_median > const
extractor< tag::with_p_square_cumulative_distribution_median > const
boost::accumulators::depends_on< p_square_quantile_for_median >boost::accumulators::depends_on< count, density >boost::accumulators::depends_on< p_square_cumulative_distribution >
extractor< tag::min > const
boost::accumulators::depends_on<>
int
tag::moment< N >tag::weighted_moment< N >
int
tag::weighted_moment< N >boost::accumulators::feature_of< tag::moment< N > >
tag::p_square_cumulative_distributiontag::weighted_p_square_cumulative_distribution
tag::weighted_p_square_cumulative_distributionboost::accumulators::feature_of< tag::p_square_cumulative_distribution >
extractor< tag::p_square_cumulative_distribution > const
boost::accumulators::depends_on< count >p_square_cumulative_distribution_num_cells
tag::p_square_quantiletag::weighted_p_square_quantile
tag::weighted_p_square_quantileboost::accumulators::feature_of< tag::p_square_quantile >
extractor< tag::p_square_quantile > const
extractor< tag::p_square_quantile_for_median > const
boost::accumulators::depends_on< count >boost::accumulators::depends_on< count >
tag::peaks_over_threshold< LeftRight >(with_threshold_probability)tag::peaks_over_threshold_prob< LeftRight >
tag::peaks_over_threshold< LeftRight >(with_threshold_value)tag::peaks_over_threshold< LeftRight >
tag::peaks_over_threshold< LeftRight >tag::weighted_peaks_over_threshold< LeftRight >
tag::peaks_over_threshold_prob< LeftRight >tag::weighted_peaks_over_threshold_prob< LeftRight >
tag::peaks_over_threshold< LeftRight >boost::accumulators::feature_of< tag::abstract_peaks_over_threshold >
tag::peaks_over_threshold_prob< LeftRight >boost::accumulators::feature_of< tag::abstract_peaks_over_threshold >
tag::weighted_peaks_over_threshold< LeftRight >boost::accumulators::feature_of< tag::peaks_over_threshold< LeftRight > >
tag::weighted_peaks_over_threshold_prob< LeftRight >boost::accumulators::feature_of< tag::peaks_over_threshold_prob< LeftRight > >
extractor< tag::abstract_peaks_over_threshold > const
accumulator_basePeaks over Threshold Method for Quantile and Tail Mean Estimation. According to the theorem of Pickands-Balkema-de Haan, the distribution function $F_u(x)$$F_u(x)$ of the excesses $x$$x$ over some sufficiently high threshold $u$$u$ of a distribution function $F(x)$$F(x)$ may be approximated by a generalized Pareto distribution \[ G_{\xi,\beta}(x) = \left\{ \begin{array}{ll} \beta^{-1}\left(1+\frac{\xi x}{\beta}\right)^{-1/\xi-1} & \textrm{if }\xi\neq0\\ \beta^{-1}\exp\left(-\frac{x}{\beta}\right) & \textrm{if }\xi=0, \end{array} \right. \]\[ G_{\xi,\beta}(x) = \left\{ \begin{array}{ll} \beta^{-1}\left(1+\frac{\xi x}{\beta}\right)^{-1/\xi-1} & \textrm{if }\xi\neq0\\ \beta^{-1}\exp\left(-\frac{x}{\beta}\right) & \textrm{if }\xi=0, \end{array} \right. \] with suitable parameters $\xi$$\xi$ and $\beta$$\beta$ that can be estimated, e.g., with the method of moments, cf. Hosking and Wallis (1987), \[ \begin{array}{lll} \hat{\xi} & = & \frac{1}{2}\left[1-\frac{(\hat{\mu}-u)^2}{\hat{\sigma}^2}\right]\\ \hat{\beta} & = & \frac{\hat{\mu}-u}{2}\left[\frac{(\hat{\mu}-u)^2}{\hat{\sigma}^2}+1\right], \end{array} \]\[ \begin{array}{lll} \hat{\xi} & = & \frac{1}{2}\left[1-\frac{(\hat{\mu}-u)^2}{\hat{\sigma}^2}\right]\\ \hat{\beta} & = & \frac{\hat{\mu}-u}{2}\left[\frac{(\hat{\mu}-u)^2}{\hat{\sigma}^2}+1\right], \end{array} \] $\hat{\mu}$$\hat{\mu}$ and $\hat{\sigma}^2$$\hat{\sigma}^2$ being the empirical mean and variance of the samples over the threshold $u$$u$. Equivalently, the distribution function $F_u(x-u)$$F_u(x-u)$ of the exceedances $x-u$$x-u$ can be approximated by $G_{\xi,\beta}(x-u)=G_{\xi,\beta,u}(x)$$G_{\xi,\beta}(x-u)=G_{\xi,\beta,u}(x)$. Since for $x\geq u$$x\geq u$ the distribution function $F(x)$$F(x)$ can be written as \[ F(x) = [1 - \P(X \leq u)]F_u(x - u) + \P(X \leq u) \]\[ F(x) = [1 - \P(X \leq u)]F_u(x - u) + \P(X \leq u) \] and the probability $\P(X \leq u)$$\P(X \leq u)$ can be approximated by the empirical distribution function $F_n(u)$$F_n(u)$ evaluated at $u$$u$, an estimator of $F(x)$$F(x)$ is given by \[ \widehat{F}(x) = [1 - F_n(u)]G_{\xi,\beta,u}(x) + F_n(u). \]\[ \widehat{F}(x) = [1 - F_n(u)]G_{\xi,\beta,u}(x) + F_n(u). \] It can be shown that $\widehat{F}(x)$$\widehat{F}(x)$ is a generalized Pareto distribution $G_{\xi,\bar{\beta},\bar{u}}(x)$$G_{\xi,\bar{\beta},\bar{u}}(x)$ with $\bar{\beta}=\beta[1-F_n(u)]^{\xi}$$\bar{\beta}=\beta[1-F_n(u)]^{\xi}$ and $\bar{u}=u-\bar{\beta}\left\{[1-F_n(u)]^{-\xi}-1\right\}/\xi$$\bar{u}=u-\bar{\beta}\left\{[1-F_n(u)]^{-\xi}-1\right\}/\xi$. By inverting $\widehat{F}(x)$$\widehat{F}(x)$, one obtains an estimator for the $\alpha$$\alpha$-quantile, \[ \hat{q}_{\alpha} = \bar{u} + \frac{\bar{\beta}}{\xi}\left[(1-\alpha)^{-\xi}-1\right], \]\[ \hat{q}_{\alpha} = \bar{u} + \frac{\bar{\beta}}{\xi}\left[(1-\alpha)^{-\xi}-1\right], \] and similarly an estimator for the (coherent) tail mean, \[ \widehat{CTM}_{\alpha} = \hat{q}_{\alpha} - \frac{\bar{\beta}}{\xi-1}(1-\alpha)^{-\xi}, \]\[ \widehat{CTM}_{\alpha} = \hat{q}_{\alpha} - \frac{\bar{\beta}}{\xi-1}(1-\alpha)^{-\xi}, \] cf. McNeil and Frey (2000).Note that in case extreme values of the left tail are fitted, the distribution is mirrored with respect to the $y$$y$ axis such that the left tail can be treated as a right tail. The computed fit parameters thus define the Pareto distribution that fits the mirrored left tail. When quantities like a quantile or a tail mean are computed using the fit parameters obtained from the mirrored data, the result is mirrored back, yielding the correct result.For further details, seeJ. R. M. Hosking and J. R. Wallis, Parameter and quantile estimation for the generalized Pareto distribution, Technometrics, Volume 29, 1987, p. 339-349A. J. McNeil and R. Frey, Estimation of Tail-Related Risk Measures for Heteroscedastic Financial Time Series: an Extreme Value Approach, Journal of Empirical Finance, Volume 7, 2000, p. 271-300
numeric::functional::fdiv< Sample, std::size_t >::result_type
boost::tuple< float_type, float_type, float_type >
mpl::int_< is_same< LeftRight, left >::value ? -1 :1 >
void
Args const &
result_type
Args const &
void
Archive &const unsigned int
Args const &
boost::accumulators::depends_on<>
tag::pot_quantile< LeftRight >(with_threshold_probability)tag::pot_quantile_prob< LeftRight >
tag::pot_quantile< LeftRight >(with_threshold_value)tag::pot_quantile< LeftRight >
tag::weighted_pot_quantile< LeftRight >(with_threshold_probability)tag::weighted_pot_quantile_prob< LeftRight >
tag::weighted_pot_quantile< LeftRight >(with_threshold_value)tag::weighted_pot_quantile< LeftRight >
tag::pot_quantile< LeftRight >tag::weighted_pot_quantile< LeftRight >
tag::pot_quantile_prob< LeftRight >tag::weighted_pot_quantile_prob< LeftRight >
tag::pot_quantile< LeftRight >boost::accumulators::feature_of< tag::quantile >
tag::pot_quantile_prob< LeftRight >boost::accumulators::feature_of< tag::quantile >
tag::weighted_pot_quantile< LeftRight >boost::accumulators::feature_of< tag::pot_quantile< LeftRight > >
tag::weighted_pot_quantile_prob< LeftRight >boost::accumulators::feature_of< tag::pot_quantile_prob< LeftRight > >
tag::pot_tail_mean< LeftRight >(with_threshold_probability)tag::pot_tail_mean_prob< LeftRight >
tag::pot_tail_mean< LeftRight >(with_threshold_value)tag::pot_tail_mean< LeftRight >
tag::weighted_pot_tail_mean< LeftRight >(with_threshold_probability)tag::weighted_pot_tail_mean_prob< LeftRight >
tag::weighted_pot_tail_mean< LeftRight >(with_threshold_value)tag::weighted_pot_tail_mean< LeftRight >
tag::pot_tail_mean< LeftRight >tag::weighted_pot_tail_mean< LeftRight >
tag::pot_tail_mean_prob< LeftRight >tag::weighted_pot_tail_mean_prob< LeftRight >
tag::pot_tail_mean< LeftRight >boost::accumulators::feature_of< tag::tail_mean >
tag::pot_tail_mean_prob< LeftRight >boost::accumulators::feature_of< tag::tail_mean >
tag::weighted_pot_tail_mean< LeftRight >boost::accumulators::feature_of< tag::pot_tail_mean< LeftRight > >
tag::weighted_pot_tail_mean_prob< LeftRight >boost::accumulators::feature_of< tag::pot_tail_mean_prob< LeftRight > >
extractor< tag::rolling_count > const
boost::accumulators::depends_on< rolling_window_plus1 >boost::parameter::keyword< tag::rolling_window_size > consttag::rolling_window::window_size named parameter
tag::rolling_mean(immediate)tag::immediate_rolling_mean
tag::rolling_mean(lazy)tag::lazy_rolling_mean
tag::immediate_rolling_meanboost::accumulators::feature_of< tag::rolling_mean >
tag::lazy_rolling_meanboost::accumulators::feature_of< tag::rolling_mean >
extractor< tag::lazy_rolling_mean > const
extractor< tag::immediate_rolling_mean > const
extractor< tag::rolling_mean > const
accumulator_basenumeric::functional::fdiv< Sample, std::size_t >::result_type
void
Args const &
result_type
Args const &
void
Archive &const unsigned int
Args const &
accumulator_basenumeric::functional::fdiv< Sample, std::size_t, void, void >::result_type
result_type
Args const &
void
Archive &const unsigned int
dont_care
boost::accumulators::depends_on< rolling_window_plus1, rolling_count >boost::parameter::keyword< tag::rolling_window_size > consttag::rolling_window::window_size named parameter
boost::accumulators::depends_on< rolling_sum, rolling_count >boost::parameter::keyword< tag::rolling_window_size > consttag::rolling_window::window_size named parameter
boost::accumulators::tag::immediate_rolling_mean
accumulator_basenumeric::functional::fdiv< Sample, std::size_t, void, void >::result_type
N::value0
void
Args const &
result_type
Args const &
void
Archive &const unsigned int
Args const &
int
boost::accumulators::depends_on< rolling_window_plus1, rolling_count >boost::parameter::keyword< tag::rolling_window_size > consttag::rolling_window::window_size named parameter
extractor< tag::rolling_sum > const
boost::accumulators::depends_on< rolling_window_plus1 >boost::parameter::keyword< tag::rolling_window_size > consttag::rolling_window::window_size named parameter
tag::rolling_variance(immediate)tag::immediate_rolling_variance
tag::rolling_variance(lazy)tag::lazy_rolling_variance
tag::immediate_rolling_varianceboost::accumulators::feature_of< tag::rolling_variance >
tag::lazy_rolling_varianceboost::accumulators::feature_of< tag::rolling_variance >
extractor< tag::lazy_rolling_variance > const
extractor< tag::immediate_rolling_variance > const
extractor< tag::rolling_variance > const
accumulator_baseIterative calculation of the rolling variance. Iterative calculation of sample variance $\sigma_n^2$$\sigma_n^2$ is done as follows, see also http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance. For a rolling window of size $N$$N$, for the first $N$$N$ samples, the variance is computed according to the formula \[ \sigma_n^2 = \frac{1}{n-1} \sum_{i = 1}^n (x_i - \mu_n)^2 = \frac{1}{n-1}M_{2,n}, \]\[ \sigma_n^2 = \frac{1}{n-1} \sum_{i = 1}^n (x_i - \mu_n)^2 = \frac{1}{n-1}M_{2,n}, \] where the sum of squares $M_{2,n}$$M_{2,n}$ can be recursively computed as: \[ M_{2,n} = \sum_{i = 1}^n (x_i - \mu_n)^2 = M_{2,n-1} + (x_n - \mu_n)(x_n - \mu_{n-1}), \]\[ M_{2,n} = \sum_{i = 1}^n (x_i - \mu_n)^2 = M_{2,n-1} + (x_n - \mu_n)(x_n - \mu_{n-1}), \] and the estimate of the sample mean as: \[ \mu_n = \frac{1}{n} \sum_{i = 1}^n x_i = \mu_{n-1} + \frac{1}{n}(x_n - \mu_{n-1}). \]\[ \mu_n = \frac{1}{n} \sum_{i = 1}^n x_i = \mu_{n-1} + \frac{1}{n}(x_n - \mu_{n-1}). \] For further samples, when the rolling window is fully filled with data, one has to take into account that the oldest sample $x_{n-N}$$x_{n-N}$ is dropped from the window. The sample variance over the window now becomes: \[ \sigma_n^2 = \frac{1}{N-1} \sum_{i = n-N+1}^n (x_i - \mu_n)^2 = \frac{1}{n-1}M_{2,n}, \]\[ \sigma_n^2 = \frac{1}{N-1} \sum_{i = n-N+1}^n (x_i - \mu_n)^2 = \frac{1}{n-1}M_{2,n}, \] where the sum of squares $M_{2,n}$$M_{2,n}$ now equals: \[ M_{2,n} = \sum_{i = n-N+1}^n (x_i - \mu_n)^2 = M_{2,n-1} + (x_n - \mu_n)(x_n - \mu_{n-1}) - (x_{n-N} - \mu_n)(x_{n-N} - \mu_{n-1}), \]\[ M_{2,n} = \sum_{i = n-N+1}^n (x_i - \mu_n)^2 = M_{2,n-1} + (x_n - \mu_n)(x_n - \mu_{n-1}) - (x_{n-N} - \mu_n)(x_{n-N} - \mu_{n-1}), \] and the estimated mean is: \[ \mu_n = \frac{1}{N} \sum_{i = n-N+1}^n x_i = \mu_{n-1} + \frac{1}{n}(x_n - x_{n-N}). \]\[ \mu_n = \frac{1}{N} \sum_{i = n-N+1}^n x_i = \mu_{n-1} + \frac{1}{n}(x_n - x_{n-N}). \]Note that the sample variance is not defined for $n <= 1$$n <= 1$. numeric::functional::fdiv< Sample, std::size_t >::result_type
void
Args const &
result_type
Args const &
void
Archive &const unsigned int
Args const &
void
T &typename boost::enable_if< boost::is_arithmetic< T >, T >::type *0
void
T &typename boost::disable_if< boost::is_arithmetic< T >, T >::type *0
accumulator_baseImmediate (lazy) calculation of the rolling variance. Calculation of sample variance $\sigma_n^2$$\sigma_n^2$ is done as follows, see also http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance. For a rolling window of size $N$$N$, when $n <= N$$n <= N$, the variance is computed according to the formula \[ \sigma_n^2 = \frac{1}{n-1} \sum_{i = 1}^n (x_i - \mu_n)^2. \]\[ \sigma_n^2 = \frac{1}{n-1} \sum_{i = 1}^n (x_i - \mu_n)^2. \] When $n > N$$n > N$, the sample variance over the window becomes: \[ \sigma_n^2 = \frac{1}{N-1} \sum_{i = n-N+1}^n (x_i - \mu_n)^2. \]\[ \sigma_n^2 = \frac{1}{N-1} \sum_{i = n-N+1}^n (x_i - \mu_n)^2. \] numeric::functional::fdiv< Sample, std::size_t, void, void >::result_type
result_type
Args const &
void
Archive &const unsigned int
dont_care
boost::accumulators::depends_on< rolling_window_plus1, rolling_count, immediate_rolling_mean >boost::parameter::keyword< tag::rolling_window_size > consttag::rolling_window::window_size named parameter
boost::accumulators::depends_on< rolling_count, rolling_mean, rolling_moment< 2 > >boost::parameter::keyword< tag::rolling_window_size > consttag::rolling_window::window_size named parameter
boost::accumulators::tag::immediate_rolling_variance
extractor< tag::rolling_window_plus1 > const
extractor< tag::rolling_window > const
bool
Args const &
boost::accumulators::depends_on< rolling_window_plus1 >boost::parameter::keyword< tag::rolling_window_size > consttag::rolling_window::size named parameter
boost::accumulators::depends_on<>rolling_window_sizeboost::parameter::keyword< tag::rolling_window_size > consttag::rolling_window::size named parameter
void
Archive &const circular_buffer< T > &const unsigned int
void
Archive &circular_buffer< T > &const unsigned int
void
Archive &circular_buffer< T > &const unsigned int
tag::skewnesstag::weighted_skewness
tag::weighted_skewnessboost::accumulators::feature_of< tag::skewness >
extractor< tag::skewness > const
boost::accumulators::depends_on< mean, moment< 2 >, moment< 3 > >
Contains the stats<> template.
tag::sumtag::weighted_sum
tag::sum_of_variates< VariateTypeVariateTag >boost::accumulators::feature_of< tag::abstract_sum_of_variates >
tag::weighted_sumboost::accumulators::feature_of< tag::sum >
extractor< tag::sum > const
extractor< tag::sum_of_weights > const
extractor< tag::abstract_sum_of_variates > const
boost::accumulators::depends_on<>boost::accumulators::depends_on<>boost::accumulators::depends_on<>mpl::true_
tag::sum(kahan)tag::sum_kahan
tag::sum_of_weights(kahan)tag::sum_of_weights_kahan
tag::sum_kahantag::weighted_sum_kahan
tag::sum_kahanboost::accumulators::feature_of< tag::sum >
tag::sum_of_variates_kahan< VariateTypeVariateTag >boost::accumulators::feature_of< tag::abstract_sum_of_variates >
tag::sum_of_weights_kahanboost::accumulators::feature_of< tag::sum_of_weights >
tag::weighted_sum_kahanboost::accumulators::feature_of< tag::sum >
extractor< tag::sum_kahan > const
extractor< tag::sum_of_weights_kahan > const
extractor< tag::abstract_sum_of_variates > const
boost::accumulators::depends_on<>boost::accumulators::depends_on<>mpl::true_
tag::tail< LeftRight >boost::accumulators::feature_of< tag::abstract_tail >
leftleft_tail_cache_size
rightright_tail_cache_size
extractor< tag::abstract_tail > const
boost::accumulators::depends_on<>
tag::non_coherent_tail_mean< LeftRight >tag::non_coherent_weighted_tail_mean< LeftRight >
tag::coherent_tail_mean< LeftRight >boost::accumulators::feature_of< tag::tail_mean >
tag::non_coherent_tail_mean< LeftRight >boost::accumulators::feature_of< tag::abstract_non_coherent_tail_mean >
tag::non_coherent_weighted_tail_mean< LeftRight >boost::accumulators::feature_of< tag::non_coherent_tail_mean< LeftRight > >
extractor< tag::abstract_non_coherent_tail_mean > const
extractor< tag::tail_mean > const
boost::accumulators::depends_on<>
tag::tail_quantile< LeftRight >tag::weighted_tail_quantile< LeftRight >
tag::tail_quantile< LeftRight >boost::accumulators::feature_of< tag::quantile >
tag::weighted_tail_quantile< LeftRight >boost::accumulators::feature_of< tag::tail_quantile< LeftRight > >
extractor< tag::quantile > const
tag::tail_variate< VariateTypeVariateTagLeftRight >boost::accumulators::feature_of< tag::abstract_tail_variate >
tag::tail_weights< LeftRight >tag::abstract_tail_weights
extractor< tag::abstract_tail_variate > const
extractor< tag::abstract_tail_weights > const
boost::accumulators::depends_on<>boost::accumulators::depends_on<>
tag::tail_variate_means< LeftRightVariateTypeVariateTag >(absolute)tag::absolute_tail_variate_means< LeftRight, VariateType, VariateTag >
tag::tail_variate_means< LeftRightVariateTypeVariateTag >(relative)tag::relative_tail_variate_means< LeftRight, VariateType, VariateTag >
tag::absolute_tail_variate_means< LeftRightVariateTypeVariateTag >tag::absolute_weighted_tail_variate_means< LeftRight, VariateType, VariateTag >
tag::relative_tail_variate_means< LeftRightVariateTypeVariateTag >tag::relative_weighted_tail_variate_means< LeftRight, VariateType, VariateTag >
tag::absolute_tail_variate_means< LeftRightVariateTypeVariateTag >boost::accumulators::feature_of< tag::abstract_absolute_tail_variate_means >
tag::absolute_weighted_tail_variate_means< LeftRightVariateTypeVariateTag >boost::accumulators::feature_of< tag::absolute_tail_variate_means< LeftRight, VariateType, VariateTag > >
tag::relative_tail_variate_means< LeftRightVariateTypeVariateTag >boost::accumulators::feature_of< tag::abstract_relative_tail_variate_means >
tag::relative_weighted_tail_variate_means< LeftRightVariateTypeVariateTag >boost::accumulators::feature_of< tag::relative_tail_variate_means< LeftRight, VariateType, VariateTag > >
extractor< tag::abstract_absolute_tail_variate_means > const
extractor< tag::abstract_relative_tail_variate_means > const
boost::accumulators::depends_on<>boost::accumulators::depends_on<>
tag::variance(immediate)tag::variance
tag::variance(lazy)tag::lazy_variance
tag::lazy_variancetag::lazy_weighted_variance
tag::variancetag::weighted_variance
tag::lazy_varianceboost::accumulators::feature_of< tag::variance >
tag::lazy_weighted_varianceboost::accumulators::feature_of< tag::lazy_variance >
tag::weighted_varianceboost::accumulators::feature_of< tag::variance >
extractor< tag::lazy_variance > const
extractor< tag::variance > const
boost::accumulators::depends_on< moment< 2 >, mean >boost::accumulators::depends_on< count, immediate_mean >
boost::parameter::keyword< tag ::covariate1 > const
boost::parameter::keyword< tag ::covariate2 > const
extractor< tag::abstract_covariance > const
extractor< tag::density > const
boost::accumulators::depends_on< count, sum_of_weights, min, max >density_cache_sizedensity_num_binsboost::parameter::keyword< density_cache_size > const
boost::parameter::keyword< density_num_bins > const
extractor< tag::weighted_extended_p_square > const
boost::accumulators::depends_on< count, sum_of_weights >extended_p_square_probabilitiesaccumulators::impl::weighted_extended_p_square_impl< mpl::_1, mpl::_2 >
extractor< tag::weighted_kurtosis > const
boost::accumulators::depends_on< weighted_mean, weighted_moment< 2 >, weighted_moment< 3 >, weighted_moment< 4 > >
tag::weighted_mean(immediate)tag::immediate_weighted_mean
tag::weighted_mean(lazy)tag::weighted_mean
tag::weighted_mean_of_variates< VariateTypeVariateTag >(immediate)tag::immediate_weighted_mean_of_variates< VariateType, VariateTag >
tag::weighted_mean_of_variates< VariateTypeVariateTag >(lazy)tag::weighted_mean_of_variates< VariateType, VariateTag >
extractor< tag::mean > const
boost::accumulators::depends_on< sum_of_weights >boost::accumulators::depends_on< sum_of_weights, weighted_sum >
tag::weighted_median(with_density)tag::with_density_weighted_median
tag::weighted_median(with_p_square_cumulative_distribution)tag::with_p_square_cumulative_distribution_weighted_median
tag::weighted_median(with_p_square_quantile)tag::weighted_median
extractor< tag::median > const
boost::accumulators::depends_on< weighted_p_square_quantile_for_median >boost::accumulators::depends_on< count, weighted_density >boost::accumulators::depends_on< weighted_p_square_cumulative_distribution >
extractor< tag::weighted_p_square_cumulative_distribution > const
boost::accumulators::depends_on< count, sum_of_weights >p_square_cumulative_distribution_num_cellsaccumulators::impl::weighted_p_square_cumulative_distribution_impl< mpl::_1, mpl::_2 >
extractor< tag::weighted_p_square_quantile > const
extractor< tag::weighted_p_square_quantile_for_median > const
boost::accumulators::depends_on< count, sum_of_weights >accumulators::impl::weighted_p_square_quantile_impl< mpl::_1, mpl::_2, regular >
boost::accumulators::depends_on< count, sum_of_weights >accumulators::impl::weighted_p_square_quantile_impl< mpl::_1, mpl::_2, for_median >
tag::weighted_peaks_over_threshold< LeftRight >(with_threshold_probability)tag::weighted_peaks_over_threshold_prob< LeftRight >
tag::weighted_peaks_over_threshold< LeftRight >(with_threshold_value)tag::weighted_peaks_over_threshold< LeftRight >
extractor< tag::abstract_peaks_over_threshold > const
extractor< tag::weighted_skewness > const
boost::accumulators::depends_on< weighted_mean, weighted_moment< 2 >, weighted_moment< 3 > >
tag::weighted_sum_of_variates< VariateTypeVariateTag >boost::accumulators::feature_of< tag::abstract_weighted_sum_of_variates >
extractor< tag::weighted_sum > const
extractor< tag::abstract_weighted_sum_of_variates > const
boost::accumulators::depends_on<>boost::accumulators::depends_on<>
tag::weighted_sum(kahan)tag::weighted_sum_kahan
tag::weighted_sum_of_variates_kahan< VariateTypeVariateTag >boost::accumulators::feature_of< tag::abstract_weighted_sum_of_variates >
extractor< tag::weighted_sum_kahan > const
extractor< tag::abstract_weighted_sum_of_variates > const
boost::accumulators::depends_on<>
boost::accumulators::depends_on<>
extractor< tag::abstract_non_coherent_tail_mean > const
extractor< tag::quantile > const
tag::weighted_tail_variate_means< LeftRightVariateTypeVariateTag >(absolute)tag::absolute_weighted_tail_variate_means< LeftRight, VariateType, VariateTag >
tag::weighted_tail_variate_means< LeftRightVariateTypeVariateTag >(relative)tag::relative_weighted_tail_variate_means< LeftRight, VariateType, VariateTag >
extractor< tag::abstract_absolute_tail_variate_means > const
extractor< tag::abstract_relative_tail_variate_means > const
functional::multiplies< T, double const >
tag::weighted_variance(immediate)tag::weighted_variance
tag::weighted_variance(lazy)tag::lazy_weighted_variance
extractor< tag::lazy_weighted_variance > const
extractor< tag::weighted_variance > const
boost::accumulators::depends_on< weighted_moment< 2 >, weighted_mean >boost::accumulators::depends_on< count, immediate_weighted_mean >
...
mpl::vector< Stat1, Stat2,... >An MPL sequence of statistics.
...
extractor< tag::quantile > const
extractor< tag::tail_mean > const
accumulator_baseEstimation of the coherent tail mean based on order statistics (for both left and right tails) The coherent tail mean $\widehat{CTM}_{n,\alpha}(X)$$\widehat{CTM}_{n,\alpha}(X)$ is equal to the non-coherent tail mean $\widehat{NCTM}_{n,\alpha}(X)$$\widehat{NCTM}_{n,\alpha}(X)$ plus a correction term that ensures coherence in case of non-continuous distributions.\[ \widehat{CTM}_{n,\alpha}^{\mathrm{right}}(X) = \widehat{NCTM}_{n,\alpha}^{\mathrm{right}}(X) + \frac{1}{\lceil n(1-\alpha)\rceil}\hat{q}_{n,\alpha}(X)\left(1 - \alpha - \frac{1}{n}\lceil n(1-\alpha)\rceil \right) \]\[ \widehat{CTM}_{n,\alpha}^{\mathrm{right}}(X) = \widehat{NCTM}_{n,\alpha}^{\mathrm{right}}(X) + \frac{1}{\lceil n(1-\alpha)\rceil}\hat{q}_{n,\alpha}(X)\left(1 - \alpha - \frac{1}{n}\lceil n(1-\alpha)\rceil \right) \]\[ \widehat{CTM}_{n,\alpha}^{\mathrm{left}}(X) = \widehat{NCTM}_{n,\alpha}^{\mathrm{left}}(X) + \frac{1}{\lceil n\alpha\rceil}\hat{q}_{n,\alpha}(X)\left(\alpha - \frac{1}{n}\lceil n\alpha\rceil \right) \]\[ \widehat{CTM}_{n,\alpha}^{\mathrm{left}}(X) = \widehat{NCTM}_{n,\alpha}^{\mathrm{left}}(X) + \frac{1}{\lceil n\alpha\rceil}\hat{q}_{n,\alpha}(X)\left(\alpha - \frac{1}{n}\lceil n\alpha\rceil \right) \]
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_baseCovariance Estimator. An iterative Monte Carlo estimator for the covariance $\mathrm{Cov}(X,X')$$\mathrm{Cov}(X,X')$, where $X$$X$ is a sample and $X'$$X'$ is a variate, is given by:\[ \hat{c}_n = \frac{n-1}{n} \hat{c}_{n-1} + \frac{1}{n-1}(X_n - \hat{\mu}_n)(X_n' - \hat{\mu}_n'),\quad n\ge2,\quad\hat{c}_1 = 0, \]\[ \hat{c}_n = \frac{n-1}{n} \hat{c}_{n-1} + \frac{1}{n-1}(X_n - \hat{\mu}_n)(X_n' - \hat{\mu}_n'),\quad n\ge2,\quad\hat{c}_1 = 0, \]$\hat{\mu}_n$$\hat{\mu}_n$ and $\hat{\mu}_n'$$\hat{\mu}_n'$ being the means of the samples and variates.
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_baseHistogram density estimator. The histogram density estimator returns a histogram of the sample distribution. The positions and sizes of the bins are determined using a specifiable number of cached samples (cache_size). The range between the minimum and the maximum of the cached samples is subdivided into a specifiable number of bins (num_bins) of same size. Additionally, an under- and an overflow bin is added to capture future under- and overflow samples. Once the bins are determined, the cached samples and all subsequent samples are added to the correct bins. At the end, a range of std::pair is return, where each pair contains the position of the bin (lower bound) and the samples count (normalized with the total number of samples).
void
Args const &
result_type
Args const &
The number of samples must meet or exceed the cache size
void
Archive &const unsigned int
Args const &
accumulator_base
result_type
Args const &
dont_care
accumulator_baseMultiple quantile estimation with the extended $P^2$$P^2$ algorithm. Extended $P^2$$P^2$ algorithm for estimation of several quantiles without storing samples. Assume that $m$$m$ quantiles $\xi_{p_1}, \ldots, \xi_{p_m}$$\xi_{p_1}, \ldots, \xi_{p_m}$ are to be estimated. Instead of storing the whole sample cumulative distribution, the algorithm maintains only $m+2$$m+2$ principal markers and $m+1$$m+1$ middle markers, whose positions are updated with each sample and whose heights are adjusted (if necessary) using a piecewise-parablic formula. The heights of these central markers are the current estimates of the quantiles and returned as an iterator range.For further details, seeK. E. E. Raatikainen, Simultaneous estimation of several quantiles, Simulation, Volume 49, Number 4 (October), 1986, p. 159-164.The extended $ P^2 $$ P^2 $ algorithm generalizes the $ P^2 $$ P^2 $ algorithm ofR. Jain and I. Chlamtac, The P^2 algorithm for dynamic calculation of quantiles and histograms without storing observations, Communications of the ACM, Volume 28 (October), Number 10, 1985, p. 1076-1085.
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_baseQuantile estimation using the extended $P^2$$P^2$ algorithm for weighted and unweighted samples. Uses the quantile estimates calculated by the extended $P^2$$P^2$ algorithm to compute intermediate quantile estimates by means of quadratic interpolation.
result_type
Args const &
void
Archive &const unsigned int
Args const &
accumulator_base
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_base
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_baseKurtosis estimation. The kurtosis of a sample distribution is defined as the ratio of the 4th central moment and the square of the 2nd central moment (the variance) of the samples, minus 3. The term $ -3 $$ -3 $ is added in order to ensure that the normal distribution has zero kurtosis. The kurtosis can also be expressed by the simple moments:\[ \hat{g}_2 = \frac {\widehat{m}_n^{(4)}-4\widehat{m}_n^{(3)}\hat{\mu}_n+6\widehat{m}_n^{(2)}\hat{\mu}_n^2-3\hat{\mu}_n^4} {\left(\widehat{m}_n^{(2)} - \hat{\mu}_n^{2}\right)^2} - 3, \]\[ \hat{g}_2 = \frac {\widehat{m}_n^{(4)}-4\widehat{m}_n^{(3)}\hat{\mu}_n+6\widehat{m}_n^{(2)}\hat{\mu}_n^2-3\hat{\mu}_n^4} {\left(\widehat{m}_n^{(2)} - \hat{\mu}_n^{2}\right)^2} - 3, \]where $ \widehat{m}_n^{(i)} $$ \widehat{m}_n^{(i)} $ are the $ i $$ i $-th moment and $ \hat{\mu}_n $$ \hat{\mu}_n $ the mean (first moment) of the $ n $$ n $ samples.
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_baseLazy calculation of variance. Default sample variance implementation based on the second moment $ M_n^{(2)} $$ M_n^{(2)} $ moment<2>, mean and count. \[ \sigma_n^2 = M_n^{(2)} - \mu_n^2. \]\[ \sigma_n^2 = M_n^{(2)} - \mu_n^2. \] where \[ \mu_n = \frac{1}{n} \sum_{i = 1}^n x_i. \]\[ \mu_n = \frac{1}{n} \sum_{i = 1}^n x_i. \] is the estimate of the sample mean and $n$$n$ is the number of samples.
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_baseLazy calculation of variance of weighted samples. The default implementation of the variance of weighted samples is based on the second moment $\widehat{m}_n^{(2)}$$\widehat{m}_n^{(2)}$ (weighted_moment<2>) and the mean $ \hat{\mu}_n$$ \hat{\mu}_n$ (weighted_mean): \[ \hat{\sigma}_n^2 = \widehat{m}_n^{(2)}-\hat{\mu}_n^2, \]\[ \hat{\sigma}_n^2 = \widehat{m}_n^{(2)}-\hat{\mu}_n^2, \] where $n$$n$ is the number of samples.
result_type
Args const &
dont_care
accumulator_base
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_base
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_baseMedian estimation based on the $P^2$$P^2$ quantile estimator. The $P^2$$P^2$ algorithm is invoked with a quantile probability of 0.5.
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_base
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_base
N::value0
void
Args const &
result_type
Args const &
void
Archive &const unsigned int
Args const &
accumulator_baseEstimation of the (non-coherent) tail mean based on order statistics (for both left and right tails) An estimation of the non-coherent tail mean $\widehat{NCTM}_{n,\alpha}(X)$$\widehat{NCTM}_{n,\alpha}(X)$ is given by the mean of the $\lceil n\alpha\rceil$$\lceil n\alpha\rceil$ smallest samples (left tail) or the mean of the $\lceil n(1-\alpha)\rceil$$\lceil n(1-\alpha)\rceil$ largest samples (right tail), $n$$n$ being the total number of samples and $\alpha$$\alpha$ the quantile level:\[ \widehat{NCTM}_{n,\alpha}^{\mathrm{right}}(X) = \frac{1}{\lceil n(1-\alpha)\rceil} \sum_{i=\lceil \alpha n \rceil}^n X_{i:n} \]\[ \widehat{NCTM}_{n,\alpha}^{\mathrm{right}}(X) = \frac{1}{\lceil n(1-\alpha)\rceil} \sum_{i=\lceil \alpha n \rceil}^n X_{i:n} \]\[ \widehat{NCTM}_{n,\alpha}^{\mathrm{left}}(X) = \frac{1}{\lceil n\alpha\rceil} \sum_{i=1}^{\lceil \alpha n \rceil} X_{i:n} \]\[ \widehat{NCTM}_{n,\alpha}^{\mathrm{left}}(X) = \frac{1}{\lceil n\alpha\rceil} \sum_{i=1}^{\lceil \alpha n \rceil} X_{i:n} \]It thus requires the caching of at least the $\lceil n\alpha\rceil$$\lceil n\alpha\rceil$ smallest or the $\lceil n(1-\alpha)\rceil$$\lceil n(1-\alpha)\rceil$ largest samples.
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_baseEstimation of the (non-coherent) weighted tail mean based on order statistics (for both left and right tails) An estimation of the non-coherent, weighted tail mean $\widehat{NCTM}_{n,\alpha}(X)$$\widehat{NCTM}_{n,\alpha}(X)$ is given by the weighted mean of the\[ \lambda = \inf\left\{ l \left| \frac{1}{\bar{w}_n}\sum_{i=1}^{l} w_i \geq \alpha \right. \right\} \]\[ \lambda = \inf\left\{ l \left| \frac{1}{\bar{w}_n}\sum_{i=1}^{l} w_i \geq \alpha \right. \right\} \]smallest samples (left tail) or the weighted mean of the\[ n + 1 - \rho = n + 1 - \sup\left\{ r \left| \frac{1}{\bar{w}_n}\sum_{i=r}^{n} w_i \geq (1 - \alpha) \right. \right\} \]\[ n + 1 - \rho = n + 1 - \sup\left\{ r \left| \frac{1}{\bar{w}_n}\sum_{i=r}^{n} w_i \geq (1 - \alpha) \right. \right\} \]largest samples (right tail) above a quantile $\hat{q}_{\alpha}$$\hat{q}_{\alpha}$ of level $\alpha$$\alpha$, $n$$n$ being the total number of sample and $\bar{w}_n$$\bar{w}_n$ the sum of all $n$$n$ weights:\[ \widehat{NCTM}_{n,\alpha}^{\mathrm{left}}(X) = \frac{\sum_{i=1}^{\lambda} w_i X_{i:n}}{\sum_{i=1}^{\lambda} w_i}, \]\[ \widehat{NCTM}_{n,\alpha}^{\mathrm{left}}(X) = \frac{\sum_{i=1}^{\lambda} w_i X_{i:n}}{\sum_{i=1}^{\lambda} w_i}, \]\[ \widehat{NCTM}_{n,\alpha}^{\mathrm{right}}(X) = \frac{\sum_{i=\rho}^n w_i X_{i:n}}{\sum_{i=\rho}^n w_i}. \]\[ \widehat{NCTM}_{n,\alpha}^{\mathrm{right}}(X) = \frac{\sum_{i=\rho}^n w_i X_{i:n}}{\sum_{i=\rho}^n w_i}. \]
result_type
Args const &
dont_care
accumulator_baseHistogram calculation of the cumulative distribution with the $P^2$$P^2$ algorithm. A histogram of the sample cumulative distribution is computed dynamically without storing samples based on the $ P^2 $$ P^2 $ algorithm. The returned histogram has a specifiable amount (num_cells) equiprobable (and not equal-sized) cells.For further details, seeR. Jain and I. Chlamtac, The P^2 algorithm for dynamic calculation of quantiles and histograms without storing observations, Communications of the ACM, Volume 28 (October), Number 10, 1985, p. 1076-1085.
void
Args const &
result_type
Args const &
void
Archive &const unsigned int
Args const &
accumulator_baseSingle quantile estimation with the $P^2$$P^2$ algorithm. The $P^2$$P^2$ algorithm estimates a quantile dynamically without storing samples. Instead of storing the whole sample cumulative distribution, only five points (markers) are stored. The heights of these markers are the minimum and the maximum of the samples and the current estimates of the $(p/2)$$(p/2)$-, $p$$p$- and $(1+p)/2$$(1+p)/2$-quantiles. Their positions are equal to the number of samples that are smaller or equal to the markers. Each time a new samples is recorded, the positions of the markers are updated and if necessary their heights are adjusted using a piecewise- parabolic formula.For further details, seeR. Jain and I. Chlamtac, The P^2 algorithm for dynamic calculation of quantiles and histograms without storing observations, Communications of the ACM, Volume 28 (October), Number 10, 1985, p. 1076-1085.
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_basePeaks over Threshold Method for Quantile and Tail Mean Estimation. See Also:peaks_over_threshold_impl
voiddont_care
result_type
Args const &
void
Archive &const unsigned int
Args const &
accumulator_baseQuantile Estimation based on Peaks over Threshold Method (for both left and right tails) Computes an estimate \[ \hat{q}_{\alpha} = \bar{u} + \frac{\bar{\beta}}{\xi}\left[(1-\alpha)^{-\xi}-1\right] \]\[ \hat{q}_{\alpha} = \bar{u} + \frac{\bar{\beta}}{\xi}\left[(1-\alpha)^{-\xi}-1\right] \] for a right or left extreme quantile, $\bar[u]$$\bar[u]$, $\bar{\beta}$$\bar{\beta}$ and $\xi$$\xi$ being the parameters of the generalized Pareto distribution that approximates the right tail of the distribution (or the mirrored left tail, in case the left tail is used). In the latter case, the result is mirrored back, yielding the correct result.
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_baseEstimation of the (coherent) tail mean based on the peaks over threshold method (for both left and right tails) Computes an estimate for the (coherent) tail mean \[ \widehat{CTM}_{\alpha} = \hat{q}_{\alpha} - \frac{\bar{\beta}}{\xi-1}(1-\alpha)^{-\xi}, \]\[ \widehat{CTM}_{\alpha} = \hat{q}_{\alpha} - \frac{\bar{\beta}}{\xi-1}(1-\alpha)^{-\xi}, \] where $\bar[u]$$\bar[u]$, $\bar{\beta}$$\bar{\beta}$ and $\xi$$\xi$ are the parameters of the generalized Pareto distribution that approximates the right tail of the distribution (or the mirrored left tail, in case the left tail is used). In the latter case, the result is mirrored back, yielding the correct result.
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_base
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_base
void
Args const &
result_type
Args const &
void
Archive &const unsigned int
Args const &
accumulator_base
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_base
void
Args const &
bool
result_typedont_care
void
Archive &const unsigned int
Args const &
rolling_window_plus1_impl const &
rolling_window_plus1_impl &rolling_window_plus1_impl const &
accumulator_baseSkewness estimation. The skewness of a sample distribution is defined as the ratio of the 3rd central moment and the $ 3/2 $$ 3/2 $-th power of the 2nd central moment (the variance) of the samples 3. The skewness can also be expressed by the simple moments:\[ \hat{g}_1 = \frac {\widehat{m}_n^{(3)}-3\widehat{m}_n^{(2)}\hat{\mu}_n+2\hat{\mu}_n^3} {\left(\widehat{m}_n^{(2)} - \hat{\mu}_n^{2}\right)^{3/2}} \]\[ \hat{g}_1 = \frac {\widehat{m}_n^{(3)}-3\widehat{m}_n^{(2)}\hat{\mu}_n+2\hat{\mu}_n^3} {\left(\widehat{m}_n^{(2)} - \hat{\mu}_n^{2}\right)^{3/2}} \]where $ \widehat{m}_n^{(i)} $$ \widehat{m}_n^{(i)} $ are the $ i $$ i $-th moment and $ \hat{\mu}_n $$ \hat{\mu}_n $ the mean (first moment) of the $ n $$ n $ samples.
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_base
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_base
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &Kahan summation algorithm. The Kahan summation algorithm reduces the numerical error obtained with standard sequential sum.
accumulator_base
(mpl::or_< is_same< LeftRight, right >, is_same< LeftRight, left > >)
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
tail_impl const &
void
Args const &std::size_t
accumulator_baseTail quantile estimation based on order statistics (for both left and right tails) The estimation of a tail quantile $\hat{q}$$\hat{q}$ with level $\alpha$$\alpha$ based on order statistics requires the caching of at least the $\lceil n\alpha\rceil$$\lceil n\alpha\rceil$ smallest or the $\lceil n(1-\alpha)\rceil$$\lceil n(1-\alpha)\rceil$ largest samples, $n$$n$ being the total number of samples. The largest of the $\lceil n\alpha\rceil$$\lceil n\alpha\rceil$ smallest samples or the smallest of the $\lceil n(1-\alpha)\rceil$$\lceil n(1-\alpha)\rceil$ largest samples provides an estimate for the quantile:\[ \hat{q}_{n,\alpha} = X_{\lceil \alpha n \rceil:n} \]\[ \hat{q}_{n,\alpha} = X_{\lceil \alpha n \rceil:n} \]
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_base
void
Args const &std::size_t
result_type
Args const &
Args const &
result_type
TailRng const &
void
Archive &const unsigned int
accumulator_baseEstimation of the absolute and relative tail variate means (for both left and right tails) For all $j$$j$-th variates associated to the $\lceil n(1-\alpha)\rceil$$\lceil n(1-\alpha)\rceil$ largest samples (or the $\lceil n(1-\alpha)\rceil$$\lceil n(1-\alpha)\rceil$ smallest samples in case of the left tail), the absolute tail means $\widehat{ATM}_{n,\alpha}(X, j)$$\widehat{ATM}_{n,\alpha}(X, j)$ are computed and returned as an iterator range. Alternatively, the relative tail means $\widehat{RTM}_{n,\alpha}(X, j)$$\widehat{RTM}_{n,\alpha}(X, j)$ are returned, which are the absolute tail means normalized with the (non-coherent) sample tail mean $\widehat{NCTM}_{n,\alpha}(X)$$\widehat{NCTM}_{n,\alpha}(X)$.\[ \widehat{ATM}_{n,\alpha}^{\mathrm{right}}(X, j) = \frac{1}{\lceil n(1-\alpha) \rceil} \sum_{i=\lceil \alpha n \rceil}^n \xi_{j,i} \]\[ \widehat{ATM}_{n,\alpha}^{\mathrm{right}}(X, j) = \frac{1}{\lceil n(1-\alpha) \rceil} \sum_{i=\lceil \alpha n \rceil}^n \xi_{j,i} \]\[ \widehat{ATM}_{n,\alpha}^{\mathrm{left}}(X, j) = \frac{1}{\lceil n\alpha \rceil} \sum_{i=1}^{\lceil n\alpha \rceil} \xi_{j,i} \]\[ \widehat{ATM}_{n,\alpha}^{\mathrm{left}}(X, j) = \frac{1}{\lceil n\alpha \rceil} \sum_{i=1}^{\lceil n\alpha \rceil} \xi_{j,i} \]\[ \widehat{RTM}_{n,\alpha}^{\mathrm{right}}(X, j) = \frac{\sum_{i=\lceil n\alpha \rceil}^n \xi_{j,i}} {\lceil n(1-\alpha)\rceil\widehat{NCTM}_{n,\alpha}^{\mathrm{right}}(X)} \]\[ \widehat{RTM}_{n,\alpha}^{\mathrm{right}}(X, j) = \frac{\sum_{i=\lceil n\alpha \rceil}^n \xi_{j,i}} {\lceil n(1-\alpha)\rceil\widehat{NCTM}_{n,\alpha}^{\mathrm{right}}(X)} \]\[ \widehat{RTM}_{n,\alpha}^{\mathrm{left}}(X, j) = \frac{\sum_{i=1}^{\lceil n\alpha \rceil} \xi_{j,i}} {\lceil n\alpha\rceil\widehat{NCTM}_{n,\alpha}^{\mathrm{left}}(X)} \]\[ \widehat{RTM}_{n,\alpha}^{\mathrm{left}}(X, j) = \frac{\sum_{i=1}^{\lceil n\alpha \rceil} \xi_{j,i}} {\lceil n\alpha\rceil\widehat{NCTM}_{n,\alpha}^{\mathrm{left}}(X)} \]
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_baseIterative calculation of variance. Iterative calculation of sample variance $\sigma_n^2$$\sigma_n^2$ according to the formula \[ \sigma_n^2 = \frac{1}{n} \sum_{i = 1}^n (x_i - \mu_n)^2 = \frac{n-1}{n} \sigma_{n-1}^2 + \frac{1}{n-1}(x_n - \mu_n)^2. \]\[ \sigma_n^2 = \frac{1}{n} \sum_{i = 1}^n (x_i - \mu_n)^2 = \frac{n-1}{n} \sigma_{n-1}^2 + \frac{1}{n-1}(x_n - \mu_n)^2. \] where \[ \mu_n = \frac{1}{n} \sum_{i = 1}^n x_i. \]\[ \mu_n = \frac{1}{n} \sum_{i = 1}^n x_i. \] is the estimate of the sample mean and $n$$n$ is the number of samples.Note that the sample variance is not defined for $n <= 1$$n <= 1$.A simplification can be obtained by the approximate recursion \[ \sigma_n^2 \approx \frac{n-1}{n} \sigma_{n-1}^2 + \frac{1}{n}(x_n - \mu_n)^2. \]\[ \sigma_n^2 \approx \frac{n-1}{n} \sigma_{n-1}^2 + \frac{1}{n}(x_n - \mu_n)^2. \] because the difference \[ \left(\frac{1}{n-1} - \frac{1}{n}\right)(x_n - \mu_n)^2 = \frac{1}{n(n-1)}(x_n - \mu_n)^2. \]\[ \left(\frac{1}{n-1} - \frac{1}{n}\right)(x_n - \mu_n)^2 = \frac{1}{n(n-1)}(x_n - \mu_n)^2. \] converges to zero as $n \rightarrow \infty$$n \rightarrow \infty$. However, for small $ n $$ n $ the difference can be non-negligible.
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_baseWeighted Covariance Estimator. An iterative Monte Carlo estimator for the weighted covariance $\mathrm{Cov}(X,X')$$\mathrm{Cov}(X,X')$, where $X$$X$ is a sample and $X'$$X'$ a variate, is given by:\[ \hat{c}_n = \frac{\bar{w}_n-w_n}{\bar{w}_n} \hat{c}_{n-1} + \frac{w_n}{\bar{w}_n-w_n}(X_n - \hat{\mu}_n)(X_n' - \hat{\mu}_n'), \quad n\ge2,\quad\hat{c}_1 = 0, \]\[ \hat{c}_n = \frac{\bar{w}_n-w_n}{\bar{w}_n} \hat{c}_{n-1} + \frac{w_n}{\bar{w}_n-w_n}(X_n - \hat{\mu}_n)(X_n' - \hat{\mu}_n'), \quad n\ge2,\quad\hat{c}_1 = 0, \]$\hat{\mu}_n$$\hat{\mu}_n$ and $\hat{\mu}_n'$$\hat{\mu}_n'$ being the weighted means of the samples and variates and $\bar{w}_n$$\bar{w}_n$ the sum of the $n$$n$ first weights $w_i$$w_i$.
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_baseHistogram density estimator for weighted samples. The histogram density estimator returns a histogram of the sample distribution. The positions and sizes of the bins are determined using a specifiable number of cached samples (cache_size). The range between the minimum and the maximum of the cached samples is subdivided into a specifiable number of bins (num_bins) of same size. Additionally, an under- and an overflow bin is added to capture future under- and overflow samples. Once the bins are determined, the cached samples and all subsequent samples are added to the correct bins. At the end, a range of std::pair is returned, where each pair contains the position of the bin (lower bound) and the sum of the weights (normalized with the sum of all weights).
void
Args const &
result_type
Args const &
void
Archive &const unsigned int
Args const &
accumulator_baseMultiple quantile estimation with the extended $P^2$$P^2$ algorithm for weighted samples. This version of the extended $P^2$$P^2$ algorithm extends the extended $P^2$$P^2$ algorithm to support weighted samples. The extended $P^2$$P^2$ algorithm dynamically estimates several quantiles without storing samples. Assume that $m$$m$ quantiles $\xi_{p_1}, \ldots, \xi_{p_m}$$\xi_{p_1}, \ldots, \xi_{p_m}$ are to be estimated. Instead of storing the whole sample cumulative distribution, the algorithm maintains only $m+2$$m+2$ principal markers and $m+1$$m+1$ middle markers, whose positions are updated with each sample and whose heights are adjusted (if necessary) using a piecewise-parablic formula. The heights of the principal markers are the current estimates of the quantiles and are returned as an iterator range.For further details, seeK. E. E. Raatikainen, Simultaneous estimation of several quantiles, Simulation, Volume 49, Number 4 (October), 1986, p. 159-164.The extended $ P^2 $$ P^2 $ algorithm generalizes the $ P^2 $$ P^2 $ algorithm ofR. Jain and I. Chlamtac, The P^2 algorithm for dynamic calculation of quantiles and histograms without storing observations, Communications of the ACM, Volume 28 (October), Number 10, 1985, p. 1076-1085.
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_baseKurtosis estimation for weighted samples. The kurtosis of a sample distribution is defined as the ratio of the 4th central moment and the square of the 2nd central moment (the variance) of the samples, minus 3. The term $ -3 $$ -3 $ is added in order to ensure that the normal distribution has zero kurtosis. The kurtosis can also be expressed by the simple moments:\[ \hat{g}_2 = \frac {\widehat{m}_n^{(4)}-4\widehat{m}_n^{(3)}\hat{\mu}_n+6\widehat{m}_n^{(2)}\hat{\mu}_n^2-3\hat{\mu}_n^4} {\left(\widehat{m}_n^{(2)} - \hat{\mu}_n^{2}\right)^2} - 3, \]\[ \hat{g}_2 = \frac {\widehat{m}_n^{(4)}-4\widehat{m}_n^{(3)}\hat{\mu}_n+6\widehat{m}_n^{(2)}\hat{\mu}_n^2-3\hat{\mu}_n^4} {\left(\widehat{m}_n^{(2)} - \hat{\mu}_n^{2}\right)^2} - 3, \]where $ \widehat{m}_n^{(i)} $$ \widehat{m}_n^{(i)} $ are the $ i $$ i $-th moment and $ \hat{\mu}_n $$ \hat{\mu}_n $ the mean (first moment) of the $ n $$ n $ samples.The kurtosis estimator for weighted samples is formally identical to the estimator for unweighted samples, except that the weighted counterparts of all measures it depends on are to be taken.
result_type
Args const &
dont_care
accumulator_base
result_type
Args const &
dont_care
accumulator_baseMedian estimation for weighted samples based on the $P^2$$P^2$ quantile estimator. The $P^2$$P^2$ algorithm for weighted samples is invoked with a quantile probability of 0.5.
result_type
Args const &
dont_care
accumulator_base
N::value0
void
Args const &
result_type
Args const &
void
Archive &const unsigned int
Args const &
accumulator_baseHistogram calculation of the cumulative distribution with the $P^2$$P^2$ algorithm for weighted samples. A histogram of the sample cumulative distribution is computed dynamically without storing samples based on the $ P^2 $$ P^2 $ algorithm for weighted samples. The returned histogram has a specifiable amount (num_cells) equiprobable (and not equal-sized) cells.Note that applying importance sampling results in regions to be more and other regions to be less accurately estimated than without importance sampling, i.e., with unweighted samples.For further details, seeR. Jain and I. Chlamtac, The P^2 algorithm for dynamic calculation of quantiles and histograms without storing observations, Communications of the ACM, Volume 28 (October), Number 10, 1985, p. 1076-1085.
void
Args const &
result_type
Args const &
void
Archive &const unsigned int
Args const &
accumulator_baseSingle quantile estimation with the $P^2$$P^2$ algorithm for weighted samples. This version of the $P^2$$P^2$ algorithm extends the $P^2$$P^2$ algorithm to support weighted samples. The $P^2$$P^2$ algorithm estimates a quantile dynamically without storing samples. Instead of storing the whole sample cumulative distribution, only five points (markers) are stored. The heights of these markers are the minimum and the maximum of the samples and the current estimates of the $(p/2)$$(p/2)$-, $p$$p$ - and $(1+p)/2$$(1+p)/2$ -quantiles. Their positions are equal to the number of samples that are smaller or equal to the markers. Each time a new sample is added, the positions of the markers are updated and if necessary their heights are adjusted using a piecewise- parabolic formula.For further details, seeR. Jain and I. Chlamtac, The P^2 algorithm for dynamic calculation of quantiles and histograms without storing observations, Communications of the ACM, Volume 28 (October), Number 10, 1985, p. 1076-1085.
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_baseWeighted Peaks over Threshold Method for Weighted Quantile and Weighted Tail Mean Estimation. See Also:peaks_over_threshold_impl
void
Args const &
result_type
Args const &
void
Archive &const unsigned int
Args const &
accumulator_basePeaks over Threshold Method for Quantile and Tail Mean Estimation. See Also:weighted_peaks_over_threshold_impl
voiddont_care
result_type
Args const &
Args const &
accumulator_baseSkewness estimation for weighted samples. The skewness of a sample distribution is defined as the ratio of the 3rd central moment and the $ 3/2 $$ 3/2 $-th power $ of the 2nd central moment (the variance) of the samples. The skewness can also be expressed by the simple moments:\[ \hat{g}_1 = \frac {\widehat{m}_n^{(3)}-3\widehat{m}_n^{(2)}\hat{\mu}_n+2\hat{\mu}_n^3} {\left(\widehat{m}_n^{(2)} - \hat{\mu}_n^{2}\right)^{3/2}} \]\[ \hat{g}_1 = \frac {\widehat{m}_n^{(3)}-3\widehat{m}_n^{(2)}\hat{\mu}_n+2\hat{\mu}_n^3} {\left(\widehat{m}_n^{(2)} - \hat{\mu}_n^{2}\right)^{3/2}} \]where $ \widehat{m}_n^{(i)} $$ \widehat{m}_n^{(i)} $ are the $ i $$ i $-th moment and $ \hat{\mu}_n $$ \hat{\mu}_n $ the mean (first moment) of the $ n $$ n $ samples.The skewness estimator for weighted samples is formally identical to the estimator for unweighted samples, except that the weighted counterparts of all measures it depends on are to be taken.
result_type
Args const &
dont_care
accumulator_base
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_base
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_baseTail quantile estimation based on order statistics of weighted samples (for both left and right tails) An estimator $\hat{q}$$\hat{q}$ of tail quantiles with level $\alpha$$\alpha$ based on order statistics $X_{1:n} \leq X_{2:n} \leq\dots\leq X_{n:n}$$X_{1:n} \leq X_{2:n} \leq\dots\leq X_{n:n}$ of weighted samples are given by $X_{\lambda:n}$$X_{\lambda:n}$ (left tail) and $X_{\rho:n}$$X_{\rho:n}$ (right tail), where\[ \lambda = \inf\left\{ l \left| \frac{1}{\bar{w}_n}\sum_{i=1}^{l} w_i \geq \alpha \right. \right\} \]\[ \lambda = \inf\left\{ l \left| \frac{1}{\bar{w}_n}\sum_{i=1}^{l} w_i \geq \alpha \right. \right\} \]and\[ \rho = \sup\left\{ r \left| \frac{1}{\bar{w}_n}\sum_{i=r}^{n} w_i \geq (1 - \alpha) \right. \right\}, \]\[ \rho = \sup\left\{ r \left| \frac{1}{\bar{w}_n}\sum_{i=r}^{n} w_i \geq (1 - \alpha) \right. \right\}, \]$n$$n$ being the number of samples and $\bar{w}_n$$\bar{w}_n$ the sum of all weights.
result_type
Args const &
dont_care
accumulator_baseEstimation of the absolute and relative weighted tail variate means (for both left and right tails) For all $j$$j$-th variates associated to the\[ \lambda = \inf\left\{ l \left| \frac{1}{\bar{w}_n}\sum_{i=1}^{l} w_i \geq \alpha \right. \right\} \]\[ \lambda = \inf\left\{ l \left| \frac{1}{\bar{w}_n}\sum_{i=1}^{l} w_i \geq \alpha \right. \right\} \]smallest samples (left tail) or the weighted mean of the\[ n + 1 - \rho = n + 1 - \sup\left\{ r \left| \frac{1}{\bar{w}_n}\sum_{i=r}^{n} w_i \geq (1 - \alpha) \right. \right\} \]\[ n + 1 - \rho = n + 1 - \sup\left\{ r \left| \frac{1}{\bar{w}_n}\sum_{i=r}^{n} w_i \geq (1 - \alpha) \right. \right\} \]largest samples (right tail), the absolute weighted tail means $\widehat{ATM}_{n,\alpha}(X, j)$$\widehat{ATM}_{n,\alpha}(X, j)$ are computed and returned as an iterator range. Alternatively, the relative weighted tail means $\widehat{RTM}_{n,\alpha}(X, j)$$\widehat{RTM}_{n,\alpha}(X, j)$ are returned, which are the absolute weighted tail means normalized with the weighted (non-coherent) sample tail mean $\widehat{NCTM}_{n,\alpha}(X)$$\widehat{NCTM}_{n,\alpha}(X)$.\[ \widehat{ATM}_{n,\alpha}^{\mathrm{right}}(X, j) = \frac{1}{\sum_{i=\rho}^n w_i} \sum_{i=\rho}^n w_i \xi_{j,i} \]\[ \widehat{ATM}_{n,\alpha}^{\mathrm{right}}(X, j) = \frac{1}{\sum_{i=\rho}^n w_i} \sum_{i=\rho}^n w_i \xi_{j,i} \]\[ \widehat{ATM}_{n,\alpha}^{\mathrm{left}}(X, j) = \frac{1}{\sum_{i=1}^{\lambda}} \sum_{i=1}^{\lambda} w_i \xi_{j,i} \]\[ \widehat{ATM}_{n,\alpha}^{\mathrm{left}}(X, j) = \frac{1}{\sum_{i=1}^{\lambda}} \sum_{i=1}^{\lambda} w_i \xi_{j,i} \]\[ \widehat{RTM}_{n,\alpha}^{\mathrm{right}}(X, j) = \frac{\sum_{i=\rho}^n w_i \xi_{j,i}} {\sum_{i=\rho}^n w_i \widehat{NCTM}_{n,\alpha}^{\mathrm{right}}(X)} \]\[ \widehat{RTM}_{n,\alpha}^{\mathrm{right}}(X, j) = \frac{\sum_{i=\rho}^n w_i \xi_{j,i}} {\sum_{i=\rho}^n w_i \widehat{NCTM}_{n,\alpha}^{\mathrm{right}}(X)} \]\[ \widehat{RTM}_{n,\alpha}^{\mathrm{left}}(X, j) = \frac{\sum_{i=1}^{\lambda} w_i \xi_{j,i}} {\sum_{i=1}^{\lambda} w_i \widehat{NCTM}_{n,\alpha}^{\mathrm{left}}(X)} \]\[ \widehat{RTM}_{n,\alpha}^{\mathrm{left}}(X, j) = \frac{\sum_{i=1}^{\lambda} w_i \xi_{j,i}} {\sum_{i=1}^{\lambda} w_i \widehat{NCTM}_{n,\alpha}^{\mathrm{left}}(X)} \]
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_baseIterative calculation of variance of weighted samples. Iterative calculation of variance of weighted samples: \[ \hat{\sigma}_n^2 = \frac{\bar{w}_n - w_n}{\bar{w}_n}\hat{\sigma}_{n - 1}^2 + \frac{w_n}{\bar{w}_n - w_n}\left(X_n - \hat{\mu}_n\right)^2 ,\quad n\ge2,\quad\hat{\sigma}_0^2 = 0. \]\[ \hat{\sigma}_n^2 = \frac{\bar{w}_n - w_n}{\bar{w}_n}\hat{\sigma}_{n - 1}^2 + \frac{w_n}{\bar{w}_n - w_n}\left(X_n - \hat{\mu}_n\right)^2 ,\quad n\ge2,\quad\hat{\sigma}_0^2 = 0. \] where $\bar{w}_n$$\bar{w}_n$ is the sum of the $n$$n$ weights $w_i$$w_i$ and $\hat{\mu}_n$$\hat{\mu}_n$ the estimate of the mean of the weighted samples. Note that the sample variance is not defined for $n <= 1$$n <= 1$.
void
Args const &
result_typedont_care
void
Archive &const unsigned int
Args const &
accumulator_baseMedian estimation based on the density estimator. The algorithm determines the bin in which the $0.5*cnt$$0.5*cnt$-th sample lies, $cnt$$cnt$ being the total number of samples. It returns the approximate horizontal position of this sample, based on a linear interpolation inside the bin.
voiddont_care
result_type
Args const &
void
Archive &const unsigned int
Args const &
accumulator_baseMedian estimation for weighted samples based on the density estimator. The algorithm determines the bin in which the $0.5*cnt$$0.5*cnt$-th sample lies, $cnt$$cnt$ being the total number of samples. It returns the approximate horizontal position of this sample, based on a linear interpolation inside the bin.
voiddont_care
result_type
Args const &
void
Archive &const unsigned int
Args const &
accumulator_baseMedian estimation based on the $P^2$$P^2$ cumulative distribution estimator. The algorithm determines the first (leftmost) bin with a height exceeding 0.5. It returns the approximate horizontal position of where the cumulative distribution equals 0.5, based on a linear interpolation inside the bin.
voiddont_care
result_type
Args const &
void
Archive &const unsigned int
dont_care
accumulator_baseMedian estimation for weighted samples based on the $P^2$$P^2$ cumulative distribution estimator. The algorithm determines the first (leftmost) bin with a height exceeding 0.5. It returns the approximate horizontal position of where the cumulative distribution equals 0.5, based on a linear interpolation inside the bin.
voiddont_care
result_type
Args const &
void
Archive &const unsigned int
dont_care
boost::accumulators::depends_on< count, non_coherent_tail_mean< LeftRight >, tail_variate< VariateType, VariateTag, LeftRight > >
boost::accumulators::depends_on< non_coherent_weighted_tail_mean< LeftRight >, tail_variate< VariateType, VariateTag, LeftRight >, tail_weights< LeftRight > >
boost::accumulators::depends_on< count, quantile, non_coherent_tail_mean< LeftRight > >
boost::accumulators::depends_on< count, mean, mean_of_variates< VariateType, VariateTag > >
boost::accumulators::depends_on< Feature >
boost::accumulators::depends_on< count >
boost::accumulators::depends_on< sum_of_weights >
boost::accumulators::depends_on< count, sum_of_variates< VariateType, VariateTag > >
int
boost::accumulators::depends_on< count >
boost::accumulators::depends_on< count, tail< LeftRight > >
boost::accumulators::depends_on< sum_of_weights, tail_weights< LeftRight > >
boost::accumulators::depends_on< count >pot_threshold_value
boost::accumulators::depends_on< count, tail< LeftRight > >pot_threshold_probability
boost::accumulators::depends_on< peaks_over_threshold< LeftRight > >
boost::accumulators::depends_on< peaks_over_threshold_prob< LeftRight > >
boost::accumulators::depends_on< peaks_over_threshold< LeftRight >, pot_quantile< LeftRight > >
boost::accumulators::depends_on< peaks_over_threshold_prob< LeftRight >, pot_quantile_prob< LeftRight > >boost::accumulators::depends_on<>mpl::print< class ____MISSING_SPECIFIC_QUANTILE_FEATURE_IN_ACCUMULATOR_SET____ >
boost::accumulators::depends_on< count, non_coherent_tail_mean< LeftRight >, tail_variate< VariateType, VariateTag, LeftRight > >
boost::accumulators::depends_on< non_coherent_weighted_tail_mean< LeftRight >, tail_variate< VariateType, VariateTag, LeftRight >, tail_weights< LeftRight > >
boost::accumulators::depends_on<>
boost::accumulators::depends_on<>
boost::accumulators::depends_on<>boost::accumulators::tail_cache_size_named_arg< LeftRight >boost::accumulators::depends_on<>mpl::print< class ____MISSING_SPECIFIC_TAIL_MEAN_FEATURE_IN_ACCUMULATOR_SET____ >
boost::accumulators::depends_on< count, tail< LeftRight > >
boost::accumulators::depends_on< tail< LeftRight > >
boost::accumulators::depends_on< tail< LeftRight > >
boost::accumulators::depends_on< count, sum_of_weights, weighted_mean, weighted_mean_of_variates< VariateType, VariateTag > >
boost::accumulators::depends_on< sum_of_weights, weighted_sum_of_variates< VariateType, VariateTag > >
int
boost::accumulators::depends_on< count, sum_of_weights >
boost::accumulators::depends_on< sum_of_weights >pot_threshold_value
boost::accumulators::depends_on< sum_of_weights, tail_weights< LeftRight > >pot_threshold_probability
boost::accumulators::depends_on< weighted_peaks_over_threshold< LeftRight > >
boost::accumulators::depends_on< weighted_peaks_over_threshold_prob< LeftRight > >
boost::accumulators::depends_on< weighted_peaks_over_threshold< LeftRight >, weighted_pot_quantile< LeftRight > >
boost::accumulators::depends_on< weighted_peaks_over_threshold_prob< LeftRight >, weighted_pot_quantile_prob< LeftRight > >
boost::accumulators::depends_on<>
boost::accumulators::depends_on< sum_of_weights, tail_weights< LeftRight > >