diff --git a/doc/api/confint.rst b/doc/api/confint.rst index 9ba8102d..85cdc47b 100644 --- a/doc/api/confint.rst +++ b/doc/api/confint.rst @@ -3,13 +3,15 @@ Confidence Interval ============================================== -.. function:: confint(test::HypothesisTest, alpha=0.05; tail=:both) +.. function:: confint(test::HypothesisTest, alpha=alpha(test); tail=tail(test)) - Compute a confidence interval C with coverage 1-``alpha``. + Compute a confidence interval C with coverage 1-``alpha`` + (``alpha=0.05`` is the default for all tests, unless alpha is overridden when the test + object is created). - If ``tail`` is ``:both`` (default), then a two-sided confidence + If ``tail`` is ``:both``, then a two-sided confidence interval is returned. If ``tail`` is ``:left`` or - ``:right``, then a one-sided confidence interval is returned + ``:right``, then a one-sided confidence interval is returned. .. note:: Most of the implemented confidence intervals are *strongly consistent*, that is, the diff --git a/doc/api/pvalue.rst b/doc/api/pvalue.rst index 00e30618..db53e896 100644 --- a/doc/api/pvalue.rst +++ b/doc/api/pvalue.rst @@ -3,11 +3,11 @@ p-value ============================================== -.. function:: pvalue(test::HypothesisTest; tail=:both) +.. function:: pvalue(test::HypothesisTest; tail=tail(test)) Compute the p-value for a given significance test. - If ``tail`` is ``:both`` (default), then the p-value for the + If ``tail`` is ``:both``, then the p-value for the two-sided test is returned. If ``tail`` is ``:left`` or ``:right``, then a one-sided test is performed. diff --git a/doc/parametric/test_t.rst b/doc/parametric/test_t.rst index ba96b6db..a9b21d08 100644 --- a/doc/parametric/test_t.rst +++ b/doc/parametric/test_t.rst @@ -1,16 +1,19 @@ T-test ============================================= -.. function:: OneSampleTTest(v::AbstractVector{T<:Real}, mu0::Real=0) +.. function:: OneSampleTTest(v::AbstractVector{T<:Real}, mu0::Real=0; tail::Symbol=:both, alpha::Real=0.05) Perform a one sample t-test of the null hypothesis that the data in vector ``v`` comes from a distribution with mean ``mu0`` against the alternative hypothesis that the distribution does not have mean ``mu0``. + ``tail`` and ``alpha`` specify the defaults when calling + :ref:`pvalue` and :ref:`confint`. + Implements: :ref:`pvalue`, :ref:`confint` -.. function:: OneSampleTTest(xbar::Real, stdev::Real, n::Int, mu0::Real=0) +.. function:: OneSampleTTest(xbar::Real, stdev::Real, n::Int, mu0::Real=0; tail::Symbol=:both, alpha::Real=0.05) Perform a one sample t-test of the null hypothesis that ``n`` values with mean ``xbar`` and sample standard deviation @@ -18,9 +21,12 @@ T-test the alternative hypothesis that the distribution does not have mean ``mu0``. + ``tail`` and ``alpha`` specify the defaults when calling + :ref:`pvalue` and :ref:`confint`. + Implements: :ref:`pvalue`, :ref:`confint` -.. function:: OneSampleTTest(x::AbstractVector{T<:Real}, y::AbstractVector{T<:Real}, mu0::Real=0) +.. function:: OneSampleTTest(x::AbstractVector{T<:Real}, y::AbstractVector{T<:Real}, mu0::Real=0; tail::Symbol=:both, alpha::Real=0.05) Perform a paired sample t-test of the null hypothesis that the differences between pairs of values in vectors ``x`` and @@ -28,18 +34,24 @@ T-test alternative hypothesis that the distribution does not have mean ``mu0``. + ``tail`` and ``alpha`` specify the defaults when calling + :ref:`pvalue` and :ref:`confint`. + Implements: :ref:`pvalue`, :ref:`confint` -.. function:: EqualVarianceTTest(x::AbstractVector{T<:Real}, y::AbstractVector{T<:Real}) +.. function:: EqualVarianceTTest(x::AbstractVector{T<:Real}, y::AbstractVector{T<:Real}; tail::Symbol=:both, alpha::Real=0.05) Perform a two-sample t-test of the null hypothesis that ``x`` and ``y`` come from a distributions with the same mean and equal variances against the alternative hypothesis that the distributions have different means and but equal variances. + ``tail`` and ``alpha`` specify the defaults when calling + :ref:`pvalue` and :ref:`confint`. + Implements: :ref:`pvalue`, :ref:`confint` -.. function:: UnequalVarianceTTest(x::AbstractVector{T<:Real}, y::AbstractVector{T<:Real}) +.. function:: UnequalVarianceTTest(x::AbstractVector{T<:Real}, y::AbstractVector{T<:Real}; tail::Symbol=:both, alpha::Real=0.05) Perform an unequal variance two-sample t-test of the null hypothesis that ``x`` and ``y`` come from a distributions with @@ -55,4 +67,7 @@ T-test \nu_{\chi'} \approx \frac{\left(\sum_{i=1}^n k_i s_i^2\right)^2} {\sum_{i=1}^n \frac{(k_i s_i^2)^2}{\nu_i}} + ``tail`` and ``alpha`` specify the defaults when calling + :ref:`pvalue` and :ref:`confint`. + Implements: :ref:`pvalue`, :ref:`confint` diff --git a/src/HypothesisTests.jl b/src/HypothesisTests.jl index 670b961a..d75f9573 100644 --- a/src/HypothesisTests.jl +++ b/src/HypothesisTests.jl @@ -32,7 +32,7 @@ using Rmath: pwilcox, psignrank import StatsBase.confint -export testname, pvalue, confint +export testname, pvalue, confint, tail, alpha @compat abstract type HypothesisTest end check_same_length(x::AbstractVector, y::AbstractVector) = if length(x) != length(y) @@ -72,32 +72,38 @@ end function Base.show{T<:HypothesisTest}(io::IO, test::T) println(io, testname(test)) println(io, repeat("-", length(testname(test)))) + + # utilities for pretty-printing + conf_string = string(floor((1 - alpha(test)) * 100, 6)) # limit to 6 decimals in % + format_detail(label::String, value::Any, len::Int) = # len is max length of label + " " * label * " "^max(len - length(label), 0) * string(value) # population details - has_ci = applicable(StatsBase.confint, test) + has_ci = applicable(confint, test) (param_name, param_under_h0, param_estimate) = population_param_of_interest(test) println(io, "Population details:") - println(io, " parameter of interest: $param_name") - println(io, " value under h_0: $param_under_h0") - println(io, " point estimate: $param_estimate") + println(io, format_detail("parameter of interest:", param_name, 32)) + println(io, format_detail("value under h_0:", param_under_h0, 32)) + println(io, format_detail("point estimate:", param_estimate, 32)) if has_ci - println(io, " 95% confidence interval: $(StatsBase.confint(test))") + println(io, format_detail(conf_string*"% confidence interval:", confint(test), 32)) end println(io) # test summary - p = pvalue(test) - outcome = if p > 0.05 "fail to reject" else "reject" end - tail = default_tail(test) - println(io, "Test summary:") - println(io, " outcome with 95% confidence: $outcome h_0") - if tail == :both - println(io, " two-sided p-value: $p") - elseif tail == :left || tail == :right - println(io, " one-sided p-value: $p") + testtail = tail(test) + p = pvalue(test) # obeys value of tail(test) if applicable + outcome = p > alpha(test) ? "fail to reject" : "reject" + if testtail == :both + plabel ="two-sided p-value:" + elseif testtail == :left || testtail == :right + plabel = "one-sided p-value ($(string(testtail)) tail):" else - println(io, " p-value: $p") + plabel = "p-value:" end + println(io, "Test summary:") + println(io, format_detail("outcome with "*conf_string*"% confidence:", outcome*" h_0", 36)) + println(io, format_detail(plabel, p, 36)) println(io) # further details @@ -108,8 +114,9 @@ end # parameter of interest: name, value under h0, point estimate population_param_of_interest{T<:HypothesisTest}(test::T) = ("not implemented yet", NaN, NaN) -# is the test one- or two-sided -default_tail(test::HypothesisTest) = :undefined +# is the test one- or two-sided? +tail(test::HypothesisTest) = :undefined # overloaded for defaults or field access +alpha(test::HypothesisTest) = 0.05 function show_params{T<:HypothesisTest}(io::IO, test::T, ident="") fieldidx = find(Bool[t<:Number for t in T.types]) diff --git a/src/anderson_darling.jl b/src/anderson_darling.jl index 14bda655..33e89f75 100644 --- a/src/anderson_darling.jl +++ b/src/anderson_darling.jl @@ -35,7 +35,7 @@ function OneSampleADTest{T<:Real}(x::AbstractVector{T}, d::UnivariateDistributio end testname(::OneSampleADTest) = "One sample Anderson-Darling test" -default_tail(test::OneSampleADTest) = :right +tail(test::OneSampleADTest) = :right function show_params(io::IO, x::OneSampleADTest, ident="") println(io, ident, "number of observations: $(x.n)") @@ -78,7 +78,7 @@ function KSampleADTest{T<:Real}(xs::AbstractVector{T}...; modified=true) end testname(::KSampleADTest) = "k-sample Anderson-Darling test" -default_tail(test::KSampleADTest) = :right +tail(test::KSampleADTest) = :right function show_params(io::IO, x::KSampleADTest, ident="") println(io, ident, "number of samples: $(x.k)") diff --git a/src/augmented_dickey_fuller.jl b/src/augmented_dickey_fuller.jl index 3282964e..8e568e00 100644 --- a/src/augmented_dickey_fuller.jl +++ b/src/augmented_dickey_fuller.jl @@ -207,6 +207,7 @@ end testname(::ADFTest) = "Augmented Dickey-Fuller unit root test" population_param_of_interest(x::ADFTest) = ("coefficient on lagged non-differenced variable", 0, x.coef) +tail(test::ADFTest) = :left function show_params(io::IO, x::ADFTest, ident) println(io, ident, "sample size in regression: ", x.n) diff --git a/src/binomial.jl b/src/binomial.jl index 1cac9669..cb8cf3c6 100644 --- a/src/binomial.jl +++ b/src/binomial.jl @@ -46,7 +46,7 @@ Returns the string value. E.g. "Binomial test", "Sign Test" """ testname(::BinomialTest) = "Binomial test" population_param_of_interest(x::BinomialTest) = ("Probability of success", x.p, x.x/x.n) # parameter of interest: name, value under h0, point estimate -default_tail(test::BinomialTest) = :both +tail(test::BinomialTest) = :both function show_params(io::IO, x::BinomialTest, ident="") println(io, ident, "number of observations: $(x.n)") @@ -157,7 +157,7 @@ SignTest{T<:Real, S<:Real}(x::AbstractVector{T}, y::AbstractVector{S}) = SignTes testname(::SignTest) = "Sign Test" population_param_of_interest(x::SignTest) = ("Median", x.median, median(x.data)) # parameter of interest: name, value under h0, point estimate -default_tail(test::SignTest) = :both +tail(test::SignTest) = :both function show_params(io::IO, x::SignTest, ident="") text1 = "number of observations:" diff --git a/src/box_test.jl b/src/box_test.jl index c3f43380..7dbbe3a2 100644 --- a/src/box_test.jl +++ b/src/box_test.jl @@ -62,7 +62,7 @@ end testname(::BoxPierceTest) = "Box-Pierce autocorrelation test" population_param_of_interest(x::BoxPierceTest) = ("autocorrelations up to lag k", "all zero", NaN) -default_tail(test::BoxPierceTest) = :right +tail(test::BoxPierceTest) = :right function show_params(io::IO, x::BoxPierceTest, ident) println(io, ident, "number of observations: ", x.n) @@ -111,7 +111,7 @@ end testname(::LjungBoxTest) = "Ljung-Box autocorrelation test" population_param_of_interest(x::LjungBoxTest) = ("autocorrelations up to lag k", "all zero", NaN) -default_tail(test::LjungBoxTest) = :right +tail(test::LjungBoxTest) = :right function show_params(io::IO, x::LjungBoxTest, ident) println(io, ident, "number of observations: ", x.n) diff --git a/src/breusch_godfrey.jl b/src/breusch_godfrey.jl index 9d0d2411..7dec9d00 100644 --- a/src/breusch_godfrey.jl +++ b/src/breusch_godfrey.jl @@ -68,7 +68,7 @@ end testname(::BreuschGodfreyTest) = "Breusch-Godfrey autocorrelation test" population_param_of_interest(x::BreuschGodfreyTest) = ("coefficients on lagged residuals up to lag p", "all zero", NaN) -default_tail(test::BreuschGodfreyTest) = :right +tail(test::BreuschGodfreyTest) = :right function show_params(io::IO, x::BreuschGodfreyTest, ident) println(io, ident, "number of observations: ", x.n) diff --git a/src/circular.jl b/src/circular.jl index 51cbdbcf..555297c2 100644 --- a/src/circular.jl +++ b/src/circular.jl @@ -53,7 +53,7 @@ end testname(::RayleighTest) = "Rayleigh test" population_param_of_interest(x::RayleighTest) = ("Mean resultant length", 0, x.Rbar) # parameter of interest: name, value under h0, point estimate -default_tail(test::RayleighTest) = :both +tail(test::RayleighTest) = :both function show_params(io::IO, x::RayleighTest, ident="") println(io, ident, "number of observations: $(x.n)") @@ -99,7 +99,7 @@ FisherTLinearAssociation{S <: Real, T <: Real}(theta::Vector{S}, testname(::FisherTLinearAssociation) = "T-linear test of circular-circular association" population_param_of_interest(x::FisherTLinearAssociation) = ("Circular correlation coefficient", 0, x.rho_t) # parameter of interest: name, value under h0, point estimate -default_tail(test::FisherTLinearAssociation) = :both +tail(test::FisherTLinearAssociation) = :both function show_params(io::IO, x::FisherTLinearAssociation, ident="") println(io, ident, "number of observations: [$(length(x.theta)),$(length(x.phi))]") @@ -212,7 +212,7 @@ end testname(::JammalamadakaCircularCorrelation) = "Jammalamadaka circular correlation" population_param_of_interest(x::JammalamadakaCircularCorrelation) = ("Circular-circular correlation coefficient", 0, x.r) # parameter of interest: name, value under h0, point estimate -default_tail(test::JammalamadakaCircularCorrelation) = :both +tail(test::JammalamadakaCircularCorrelation) = :both function show_params(io::IO, x::JammalamadakaCircularCorrelation, ident="") println(io, ident, "test statistic: $(x.Z)") diff --git a/src/deprecated.jl b/src/deprecated.jl index a44bc037..117ee3f5 100644 --- a/src/deprecated.jl +++ b/src/deprecated.jl @@ -1,3 +1,4 @@ using Base: @deprecate @deprecate ci(args...) confint(args...) +@deprecate default_tail(test::HypothesisTest) tail(test) diff --git a/src/fisher.jl b/src/fisher.jl index 86b76e25..d98e40bb 100644 --- a/src/fisher.jl +++ b/src/fisher.jl @@ -45,7 +45,7 @@ end testname(::FisherExactTest) = "Fisher's exact test" population_param_of_interest(x::FisherExactTest) = ("Odds ratio", 1.0, x.ω) # parameter of interest: name, value under h0, point estimate -default_tail(test::FisherExactTest) = :both +tail(test::FisherExactTest) = :both # The sizing argument to print_matrix was removed during the 0.5 dev period if VERSION < v"0.5.0-dev+1936" diff --git a/src/jarque_bera.jl b/src/jarque_bera.jl index c55abfe8..c92e9694 100644 --- a/src/jarque_bera.jl +++ b/src/jarque_bera.jl @@ -77,7 +77,7 @@ end testname(::JarqueBeraTest) = "Jarque-Bera normality test" population_param_of_interest(x::JarqueBeraTest) = ("skewness and kurtosis", "0 and 3", "$(x.skew) and $(x.kurt)") -default_tail(test::JarqueBeraTest) = :right +tail(test::JarqueBeraTest) = :right function show_params(io::IO, x::JarqueBeraTest, ident) println(io, ident, "number of observations: ", x.n) diff --git a/src/kolmogorov_smirnov.jl b/src/kolmogorov_smirnov.jl index 8c0472be..7ddf3de1 100644 --- a/src/kolmogorov_smirnov.jl +++ b/src/kolmogorov_smirnov.jl @@ -31,7 +31,7 @@ export @compat abstract type ExactKSTest <: KSTest end population_param_of_interest(x::KSTest) = ("Supremum of CDF differences", 0.0, x.δ) # parameter of interest: name, value under h0, point estimate -default_tail(test::KSTest) = :both +tail(test::KSTest) = :both ## ONE SAMPLE KS-TEST diff --git a/src/kruskal_wallis.jl b/src/kruskal_wallis.jl index 677e0013..f4f6f182 100644 --- a/src/kruskal_wallis.jl +++ b/src/kruskal_wallis.jl @@ -43,7 +43,7 @@ end testname(::KruskalWallisTest) = "Kruskal-Wallis rank sum test (chi-square approximation)" population_param_of_interest(x::KruskalWallisTest) = ("Location parameters", "all equal", NaN) # parameter of interest: name, value under h0, point estimate -default_tail(test::KruskalWallisTest) = :right +tail(test::KruskalWallisTest) = :right function show_params(io::IO, x::KruskalWallisTest, ident) println(io, ident, "number of observation in each group: ", x.n_i) diff --git a/src/mann_whitney.jl b/src/mann_whitney.jl index 78557525..0653f852 100644 --- a/src/mann_whitney.jl +++ b/src/mann_whitney.jl @@ -66,7 +66,7 @@ ExactMannWhitneyUTest{S<:Real,T<:Real}(x::AbstractVector{S}, y::AbstractVector{T testname(::ExactMannWhitneyUTest) = "Exact Mann-Whitney U test" population_param_of_interest(x::ExactMannWhitneyUTest) = ("Location parameter (pseudomedian)", 0, x.median) # parameter of interest: name, value under h0, point estimate -default_tail(test::ExactMannWhitneyUTest) = :both +tail(test::ExactMannWhitneyUTest) = :both function show_params(io::IO, x::ExactMannWhitneyUTest, ident) println(io, ident, "number of observations in each group: ", [x.nx, x.ny]) @@ -153,7 +153,7 @@ ApproximateMannWhitneyUTest{S<:Real,T<:Real}(x::AbstractVector{S}, y::AbstractVe testname(::ApproximateMannWhitneyUTest) = "Approximate Mann-Whitney U test" population_param_of_interest(x::ApproximateMannWhitneyUTest) = ("Location parameter (pseudomedian)", 0, x.median) # parameter of interest: name, value under h0, point estimate -default_tail(test::ApproximateMannWhitneyUTest) = :both +tail(test::ApproximateMannWhitneyUTest) = :both function show_params(io::IO, x::ApproximateMannWhitneyUTest, ident) println(io, ident, "number of observations in each group: ", [x.nx, x.ny]) diff --git a/src/power_divergence.jl b/src/power_divergence.jl index ab09fd7f..5c28f80e 100644 --- a/src/power_divergence.jl +++ b/src/power_divergence.jl @@ -40,7 +40,7 @@ end # parameter of interest: name, value under h0, point estimate population_param_of_interest(x::PowerDivergenceTest) = ("Multinomial Probabilities", x.theta0, x.thetahat) -default_tail(test::PowerDivergenceTest) = :right +tail(test::PowerDivergenceTest) = :right pvalue(x::PowerDivergenceTest; tail=:right) = pvalue(Chisq(x.df),x.stat; tail=tail) diff --git a/src/t.jl b/src/t.jl index 313ccbd7..9bcc7432 100644 --- a/src/t.jl +++ b/src/t.jl @@ -28,18 +28,18 @@ export OneSampleTTest, TwoSampleTTest, EqualVarianceTTest, @compat abstract type TTest <: HypothesisTest end @compat abstract type TwoSampleTTest <: TTest end -pvalue(x::TTest; tail=:both) = pvalue(TDist(x.df), x.t; tail=tail) - -default_tail(test::TTest) = :both +pvalue(x::TTest; tail=x.tail) = pvalue(TDist(x.df), x.t; tail=tail) +tail(x::TTest) = x.tail # defaults set by constructors +alpha(x::TTest) = x.alpha # confidence interval by inversion -function StatsBase.confint(x::TTest, alpha::Float64=0.05; tail=:both) +function StatsBase.confint(x::TTest, alpha::Float64=x.alpha; tail::Symbol=x.tail) check_alpha(alpha) if tail == :left - (-Inf, StatsBase.confint(x, alpha*2)[2]) + (-Inf, StatsBase.confint(x, alpha*2, tail=:both)[2]) # tail=:both blocks infinite recursion elseif tail == :right - (StatsBase.confint(x, alpha*2)[1], Inf) + (StatsBase.confint(x, alpha*2, tail=:both)[1], Inf) elseif tail == :both q = quantile(TDist(x.df), 1-alpha/2) (x.xbar-q*x.stderr, x.xbar+q*x.stderr) @@ -58,6 +58,8 @@ immutable OneSampleTTest <: TTest stderr::Real # empirical standard error t::Real # t-statistic μ0::Real # mean under h_0 + tail::Symbol # :left, :right, or :both + alpha::Real # alpha value end testname(::OneSampleTTest) = "One sample t-test" @@ -70,19 +72,21 @@ function show_params(io::IO, x::OneSampleTTest, ident="") println(io, ident, "empirical standard error: $(x.stderr)") end -function OneSampleTTest(xbar::Real, stddev::Real, n::Int, μ0::Real=0) +function OneSampleTTest(xbar::Real, stddev::Real, n::Int, μ0::Real=0; tail::Symbol=:both, alpha::Real=0.05) stderr = stddev/sqrt(n) t = (xbar-μ0)/stderr df = n-1 - OneSampleTTest(n, xbar, df, stderr, t, μ0) + OneSampleTTest(n, xbar, df, stderr, t, μ0, tail, alpha) end -OneSampleTTest{T<:Real}(v::AbstractVector{T}, μ0::Real=0) = OneSampleTTest(mean(v), std(v), length(v), μ0) +OneSampleTTest{T<:Real}(v::AbstractVector{T}, μ0::Real=0; tail::Symbol=:both, alpha::Real=0.05) = + OneSampleTTest(mean(v), std(v), length(v), μ0, tail=tail, alpha=alpha) -function OneSampleTTest{T<:Real, S<:Real}(x::AbstractVector{T}, y::AbstractVector{S}, μ0::Real=0) +function OneSampleTTest{T<:Real, S<:Real}(x::AbstractVector{T}, y::AbstractVector{S}, μ0::Real=0; + tail::Symbol=:both, alpha::Real=0.05) check_same_length(x, y) - OneSampleTTest(x - y, μ0) + OneSampleTTest(x - y, μ0, tail=tail, alpha=alpha) end @@ -96,6 +100,8 @@ immutable EqualVarianceTTest <: TwoSampleTTest stderr::Real # empirical standard error t::Real # t-statistic μ0::Real # mean difference under h_0 + tail::Symbol # :left, :right, or :both + alpha::Real # alpha value end function show_params(io::IO, x::TwoSampleTTest, ident="") @@ -108,14 +114,15 @@ end testname(::EqualVarianceTTest) = "Two sample t-test (equal variance)" population_param_of_interest(x::TwoSampleTTest) = ("Mean difference", x.μ0, x.xbar) # parameter of interest: name, value under h0, point estimate -function EqualVarianceTTest{T<:Real,S<:Real}(x::AbstractVector{T}, y::AbstractVector{S}, μ0::Real=0) +function EqualVarianceTTest{T<:Real,S<:Real}(x::AbstractVector{T}, y::AbstractVector{S}, μ0::Real=0; + tail::Symbol=:both, alpha::Real=0.05) nx, ny = length(x), length(y) xbar = mean(x) - mean(y) stddev = sqrt(((nx - 1) * var(x) + (ny - 1) * var(y)) / (nx + ny - 2)) stderr = stddev * sqrt(1/nx + 1/ny) t = (xbar - μ0) / stderr df = nx + ny - 2 - EqualVarianceTTest(nx, ny, xbar, df, stderr, t, μ0) + EqualVarianceTTest(nx, ny, xbar, df, stderr, t, μ0, tail, alpha) end @@ -129,16 +136,19 @@ immutable UnequalVarianceTTest <: TwoSampleTTest stderr::Real # empirical standard error t::Real # t-statistic μ0::Real # mean under h_0 + tail::Symbol # :left, :right, or :both + alpha::Real # alpha value end testname(::UnequalVarianceTTest) = "Two sample t-test (unequal variance)" -function UnequalVarianceTTest{T<:Real,S<:Real}(x::AbstractVector{T}, y::AbstractVector{S}, μ0::Real=0) +function UnequalVarianceTTest{T<:Real,S<:Real}(x::AbstractVector{T}, y::AbstractVector{S}, μ0::Real=0; + tail::Symbol=:both, alpha::Real=0.05) nx, ny = length(x), length(y) xbar = mean(x)-mean(y) varx, vary = var(x), var(y) stderr = sqrt(varx/nx + vary/ny) t = (xbar-μ0)/stderr df = (varx / nx + vary / ny)^2 / ((varx / nx)^2 / (nx - 1) + (vary / ny)^2 / (ny - 1)) - UnequalVarianceTTest(nx, ny, xbar, df, stderr, t, μ0) + UnequalVarianceTTest(nx, ny, xbar, df, stderr, t, μ0, tail, alpha) end diff --git a/src/wilcoxon.jl b/src/wilcoxon.jl index c1cfe6a2..e5e4f51f 100644 --- a/src/wilcoxon.jl +++ b/src/wilcoxon.jl @@ -69,7 +69,7 @@ ExactSignedRankTest{S<:Real,T<:Real}(x::AbstractVector{S}, y::AbstractVector{T}) testname(::ExactSignedRankTest) = "Exact Wilcoxon signed rank test" population_param_of_interest(x::ExactSignedRankTest) = ("Location parameter (pseudomedian)", 0, x.median) # parameter of interest: name, value under h0, point estimate -default_tail(test::ExactSignedRankTest) = :both +tail(test::ExactSignedRankTest) = :both function show_params(io::IO, x::ExactSignedRankTest, ident) println(io, ident, "number of observations: ", x.n) @@ -160,7 +160,7 @@ ApproximateSignedRankTest{S<:Real,T<:Real}(x::AbstractVector{S}, y::AbstractVect testname(::ApproximateSignedRankTest) = "Approximate Wilcoxon signed rank test" population_param_of_interest(x::ApproximateSignedRankTest) = ("Location parameter (pseudomedian)", 0, x.median) # parameter of interest: name, value under h0, point estimate -default_tail(test::ApproximateSignedRankTest) = :both +tail(test::ApproximateSignedRankTest) = :both function show_params(io::IO, x::ApproximateSignedRankTest, ident) println(io, ident, "number of observations: ", x.n) diff --git a/src/z.jl b/src/z.jl index 86828f5c..f921f611 100644 --- a/src/z.jl +++ b/src/z.jl @@ -30,7 +30,7 @@ export OneSampleZTest, TwoSampleZTest, EqualVarianceZTest, pvalue(x::ZTest; tail=:both) = pvalue(Normal(0.0, 1.0), x.z; tail=tail) -default_tail(test::ZTest) = :both +tail(test::ZTest) = :both # confidence interval by inversion function StatsBase.confint(x::ZTest, alpha::Float64=0.05; tail=:both) diff --git a/test/anderson_darling.jl b/test/anderson_darling.jl index e48dbe10..b8966cd7 100644 --- a/test/anderson_darling.jl +++ b/test/anderson_darling.jl @@ -1,5 +1,5 @@ using HypothesisTests, Distributions, Base.Test -using HypothesisTests: default_tail +using HypothesisTests: tail # One sample test n = 1000 @@ -9,7 +9,7 @@ x = rand(Normal(), n) t = OneSampleADTest(x, Normal()) @test isapprox(t.A², 0.2013, atol=0.1^4) @test isapprox(pvalue(t), 0.8811, atol=0.1^4) -@test default_tail(t) == :right +@test tail(t) == :right x = rand(DoubleExponential(), n) t = OneSampleADTest(x, Normal()) @@ -36,7 +36,7 @@ t = KSampleADTest(samples...) @test isapprox(t.A²k, 8.3926, atol=0.1^4) @test isapprox(t.σ, 1.2038, atol=0.1^4) @test isapprox(pvalue(t), 0.0020, atol=0.1^4) -@test default_tail(t) == :right +@test tail(t) == :right t = KSampleADTest(samples..., modified = false) @test isapprox(t.A²k, 8.3559, atol=0.1^4) diff --git a/test/binomial.jl b/test/binomial.jl index 9f80631e..562b1862 100644 --- a/test/binomial.jl +++ b/test/binomial.jl @@ -1,11 +1,11 @@ using HypothesisTests, Base.Test -using HypothesisTests: default_tail +using HypothesisTests: tail t = BinomialTest(26, 78) @test pvalue(t) ≈ 0.004334880883507431 @test pvalue(t, tail=:left) ≈ 0.002167440441753716 @test pvalue(t, tail=:right) ≈ 0.9989844298129187 -@test default_tail(t) == :both +@test tail(t) == :both @test_ci_approx confint(t) (0.23058523962930383, 0.4491666887959782) @test_ci_approx confint(t, tail=:left) (0.0, 0.4313047758370174) @test_ci_approx confint(t, tail=:right) (0.2451709633730693, 1.0) @@ -58,7 +58,7 @@ x = [55, 58, 61, 61, 62, 62, 62, 63, 63, 64, 66, 68, 68, 69, 69, 69, 70, 71, 72, @test pvalue(SignTest(x, 70)) ≈ 0.004425048828125003 @test pvalue(SignTest(x, 70), tail=:left) ≈ 0.0022125244140625013 @test pvalue(SignTest(x, 70), tail=:right) ≈ 0.9996356964111328 -@test default_tail(SignTest(x)) == :both +@test tail(SignTest(x)) == :both @test_ci_approx confint(SignTest(x, 70)) (62, 69) @test_ci_approx confint(SignTest(x, 70), 0.0002) (61, 71) show(IOBuffer(), SignTest(x, 70)) diff --git a/test/box_test.jl b/test/box_test.jl index 364ab1ee..34401952 100644 --- a/test/box_test.jl +++ b/test/box_test.jl @@ -1,5 +1,5 @@ using HypothesisTests, Base.Test -using HypothesisTests: default_tail +using HypothesisTests: tail sim_data_h0=[ 0.297287984535462;0.382395967790608;-0.597634476728231;-0.0104452446373756; @@ -36,7 +36,7 @@ t = HypothesisTests.BoxPierceTest(sim_data_h0,2,1) @test t.dof == 1 @test t.Q ≈ 1.233942980734545 @test pvalue(t) ≈ 0.2666415904008932 -@test default_tail(t) == :right +@test tail(t) == :right show(IOBuffer(), t) t = HypothesisTests.LjungBoxTest(sim_data_h0,5,2) @@ -46,7 +46,7 @@ t = HypothesisTests.LjungBoxTest(sim_data_h0,5,2) @test t.dof == 2 @test t.Q ≈ 3.2090126519163626 @test pvalue(t) ≈ 0.36050846449240337 -@test default_tail(t) == :right +@test tail(t) == :right show(IOBuffer(), t) sim_data_h1 = [ diff --git a/test/breusch_godfrey.jl b/test/breusch_godfrey.jl index b439524f..e32c049e 100644 --- a/test/breusch_godfrey.jl +++ b/test/breusch_godfrey.jl @@ -1,5 +1,5 @@ using HypothesisTests, Base.Test -using HypothesisTests: default_tail +using HypothesisTests: tail # data simulated under H_1 data_h1 = [ @@ -114,7 +114,7 @@ t = BreuschGodfreyTest(data_h1[:,2:end],res_vec,4) @test t.lag == 4 @test t.BG ≈ 31.39810637185552 @test pvalue(t) ≈ 2.5390992557054064e-6 -@test default_tail(t) == :right +@test tail(t) == :right show(IOBuffer(), t) t = BreuschGodfreyTest(data_h1[:,2:end],res_vec,2,false) diff --git a/test/circular.jl b/test/circular.jl index 3eb1b515..1c189485 100644 --- a/test/circular.jl +++ b/test/circular.jl @@ -1,5 +1,5 @@ using HypothesisTests, Base.Test -using HypothesisTests: default_tail +using HypothesisTests: tail # Fisher, 1995 example 4.11 @test abs(pvalue(RayleighTest(0.2370, 60)) - 0.034) <= 0.001 @@ -13,7 +13,7 @@ t = RayleighTest( 285, 292, 305, 315, 325, 328, 329, 343, 354, 359] *pi/180) @test abs(pvalue(t) - 0.20) <= 0.01 -@test default_tail(t) == :both +@test tail(t) == :both show(IOBuffer(), t) # Fisher, 1995 example 6.8 @@ -28,12 +28,12 @@ wind_direction_12pm = t = FisherTLinearAssociation(wind_direction_6am, wind_direction_12pm) @test abs(t.rho_t- 0.191) < 0.001 @test abs(pvalue(t) - 0.01) < 0.01 -@test default_tail(t) == :both +@test tail(t) == :both show(IOBuffer(), t) # Jammaladak, 2001 example 8.1 t = JammalamadakaCircularCorrelation(wind_direction_6am, wind_direction_12pm) @test abs(t.r - 0.2704648) < 1e-7 @test abs(pvalue(t) - 0.2247383) < 1e-7 -@test default_tail(t) == :both +@test tail(t) == :both show(IOBuffer(), t) diff --git a/test/fisher.jl b/test/fisher.jl index 9652a9b1..2799cabd 100644 --- a/test/fisher.jl +++ b/test/fisher.jl @@ -1,5 +1,5 @@ using HypothesisTests, Base.Test -using HypothesisTests: default_tail +using HypothesisTests: tail t = HypothesisTests.FisherExactTest(1, 1, 1, 1) @test t.ω ≈ 1.0 @@ -7,7 +7,7 @@ t = HypothesisTests.FisherExactTest(1, 1, 1, 1) @test pvalue(t; tail=:right) ≈ 0.8333333333333337 @test pvalue(t; method=:central) ≈ 1.0 @test pvalue(t; method=:minlike) ≈ 1.0 -@test default_tail(t) == :both +@test tail(t) == :both @test_ci_approx confint(t; tail=:left) (0.0, 76.24918299781056) @test_ci_approx confint(t; tail=:right) (0.013114894621608135, Inf) @test_ci_approx confint(t; method=:central) (0.006400016357911029, 156.2496006379585) diff --git a/test/jarque_bera.jl b/test/jarque_bera.jl index df208c91..99a13073 100644 --- a/test/jarque_bera.jl +++ b/test/jarque_bera.jl @@ -1,5 +1,5 @@ using HypothesisTests, Base.Test -using HypothesisTests: default_tail +using HypothesisTests: tail sim_data_h0 = 1.0 .+ [ 0.2972879845354616, 0.3823959677906078, -0.5976344767282311, -0.01044524463737564, @@ -37,7 +37,7 @@ t = JarqueBeraTest(sim_data_h0) @test t.skew ≈ -0.020527653857777352 @test t.kurt ≈ 2.5117242352057993 @test pvalue(t) ≈ 0.6003695680393418 -@test default_tail(t) == :right +@test tail(t) == :right show(IOBuffer(), t) sim_data_h1 = [ @@ -54,5 +54,5 @@ t = JarqueBeraTest(sim_data_h1) @test t.skew ≈ 0.11785113019775637 @test t.kurt ≈ 1.0138888888888888 @test pvalue(t) ≈ 0.00020338498134114293 -@test default_tail(t) == :right +@test tail(t) == :right show(IOBuffer(), t) diff --git a/test/kolmogorov_smirnov.jl b/test/kolmogorov_smirnov.jl index 66fb1170..754b0842 100644 --- a/test/kolmogorov_smirnov.jl +++ b/test/kolmogorov_smirnov.jl @@ -14,7 +14,7 @@ t = ApproximateOneSampleKSTest(x, Uniform()) @test pvalue(t) ≈ 0.6777349664784745 @test pvalue(t; tail=:left) ≈ 0.849573771973747 @test pvalue(t; tail=:right) ≈ 0.3545875485608989 -@test default_tail(t) == :both +@test tail(t) == :both show(IOBuffer(), t) t = ApproximateTwoSampleKSTest(x, [(0:24)/25...]) @@ -24,7 +24,7 @@ t = ApproximateTwoSampleKSTest(x, [(0:24)/25...]) @test pvalue(t) ≈ 0.993764859699076 @test pvalue(t; tail=:left) ≈ 0.8521437889662113 @test pvalue(t; tail=:right) ≈ 0.697676326071031 -@test default_tail(t) == :both +@test tail(t) == :both show(IOBuffer(), t) t = ExactOneSampleKSTest(x, Uniform()) @@ -34,7 +34,7 @@ t = ExactOneSampleKSTest(x, Uniform()) @test pvalue(t) ≈ 0.6263437768244742 @test pvalue(t; tail=:left) ≈ 0.8195705417998183 @test pvalue(t; tail=:right) ≈ 0.32350648882777194 -@test default_tail(t) == :both +@test tail(t) == :both show(IOBuffer(), t) ## check fit to normal distribution diff --git a/test/kruskal_wallis.jl b/test/kruskal_wallis.jl index b23ecfbe..9e1d2512 100644 --- a/test/kruskal_wallis.jl +++ b/test/kruskal_wallis.jl @@ -1,5 +1,5 @@ using HypothesisTests, Base.Test -using HypothesisTests: default_tail +using HypothesisTests: tail # www.uni-siegen.de/phil/sozialwissenschaften/soziologie/mitarbeiter/ludwig-mayerhofer/statistik/statistik_downloads/statistik_ii_7.pdf u5 = [620, 5350, 7220] @@ -14,7 +14,7 @@ t = HypothesisTests.KruskalWallisTest(u5, u250, u2500, more) @test t.H ≈ 1.5803174603174597 @test t.tie_adjustment == 1 @test pvalue(t) ≈ 0.6638608922384397 -@test default_tail(t) == :right +@test tail(t) == :right show(IOBuffer(), t) # http://www.brightstat.com/index.php?option=com_content&task=view&id=41&Itemid=1&limit=1&limitstart=2 diff --git a/test/mann_whitney.jl b/test/mann_whitney.jl index fae53656..adfda4a0 100644 --- a/test/mann_whitney.jl +++ b/test/mann_whitney.jl @@ -1,12 +1,12 @@ using HypothesisTests, Base.Test -using HypothesisTests: default_tail +using HypothesisTests: tail # Basic exact test @test abs(pvalue(ExactMannWhitneyUTest([1:10;], [2.1:2:21;])) - 0.0232) <= 1e-4 @test abs(pvalue(ExactMannWhitneyUTest([2.1:2:21;], [1:10;])) - 0.0232) <= 1e-4 @test abs(pvalue(ExactMannWhitneyUTest([1.5:10:100;], [2.1:2:21;])) - 0.0068) <= 1e-4 @test abs(pvalue(ExactMannWhitneyUTest([2.1:2:21;], [1.5:10:100;])) - 0.0068) <= 1e-4 -@test default_tail(ExactMannWhitneyUTest([1:10;], [2.1:2:21;])) == :both +@test tail(ExactMannWhitneyUTest([1:10;], [2.1:2:21;])) == :both show(IOBuffer(), ExactMannWhitneyUTest([1:10;], [2.1:2:21;])) # Exact with ties @@ -28,7 +28,7 @@ show(IOBuffer(), ExactMannWhitneyUTest([1:10;], [2:2:24;])) @test abs(pvalue(ApproximateMannWhitneyUTest([2.1:2:21;], [1:10;])) - 0.0257) <= 1e-4 @test abs(pvalue(ApproximateMannWhitneyUTest([1.5:10:100;], [2.1:2:21;])) - 0.0091) <= 1e-4 @test abs(pvalue(ApproximateMannWhitneyUTest([2.1:2:21;], [1.5:10:100;])) - 0.0091) <= 1e-4 -@test default_tail(ApproximateMannWhitneyUTest([1:10;], [2.1:2:21;])) == :both +@test tail(ApproximateMannWhitneyUTest([1:10;], [2.1:2:21;])) == :both show(IOBuffer(), ApproximateMannWhitneyUTest([1:10;], [2.1:2:21;])) # Approximate with ties diff --git a/test/power_divergence.jl b/test/power_divergence.jl index 87f1687f..9b8249e2 100644 --- a/test/power_divergence.jl +++ b/test/power_divergence.jl @@ -1,6 +1,6 @@ using HypothesisTests, Base.Test using StatsBase -using HypothesisTests: default_tail +using HypothesisTests: tail #Example 1 in R #Agresti (2007) p. 39 @@ -26,7 +26,7 @@ for i = 1:length(c) end @test pvalue(m) ≈ 2.9535891832117357e-7 -@test default_tail(m) == :right +@test tail(m) == :right @test m.stat ≈ 30.070149095754687 @test m.df ≈ 2 @test m.n ≈ 2757 diff --git a/test/t.jl b/test/t.jl index efc73583..8d2ecf53 100644 --- a/test/t.jl +++ b/test/t.jl @@ -1,5 +1,7 @@ using HypothesisTests, Base.Test -using HypothesisTests: default_tail +using HypothesisTests: tail + +iobuffer = IOBuffer() # define once to ease temporary redirection to STDOUT ## ONE SAMPLE T-TEST @@ -10,8 +12,22 @@ tst = OneSampleTTest(-5:10) @test abs(pvalue(tst) - 0.0530) <= 1e-4 @test abs(pvalue(tst; tail=:left) - 0.9735) <= 1e-4 @test abs(pvalue(tst; tail=:right) - 0.0265) <= 1e-4 -@test default_tail(tst) == :both -show(IOBuffer(), tst) +@test tail(tst) == :both +show(iobuffer, tst) + +tst = OneSampleTTest(-5:10, tail=:left) # specify the tail for the test +@test abs(pvalue(tst; tail=:both) - 0.0530) <= 1e-4 +@test abs(pvalue(tst) - 0.9735) <= 1e-4 # now tail=:left is the default in pvalue(tst) +@test abs(pvalue(tst; tail=:right) - 0.0265) <= 1e-4 +@test tail(tst) == :left +show(iobuffer, tst) + +tst = OneSampleTTest(-5:10, tail=:right) # specify the tail for the test +@test abs(pvalue(tst; tail=:both) - 0.0530) <= 1e-4 +@test abs(pvalue(tst; tail=:left) - 0.9735) <= 1e-4 +@test abs(pvalue(tst) - 0.0265) <= 1e-4 # now tail=:right is the default in pvalue(tst) +@test tail(tst) == :right +show(iobuffer, tst) tst = OneSampleTTest(mean(-5:10), std(-5:10), 16) @test abs(pvalue(tst) - 0.0530) <= 1e-4 @@ -24,7 +40,7 @@ c = confint(tst; tail=:left) c = confint(tst; tail=:right) @test abs(c[1] - 0.4135) .<= 1e-4 @test c[2] == Inf -show(IOBuffer(), tst) +show(iobuffer, tst) tst = OneSampleTTest(-10:5) @test abs(pvalue(tst) - 0.0530) <= 1e-4 @@ -33,7 +49,17 @@ tst = OneSampleTTest(-10:5) @test all(abs.([confint(tst)...] - [-5.0369, 0.0369]) .<= 1e-4) @test abs.(confint(tst; tail=:left)[2] - (-0.4135)) .<= 1e-4 @test abs.(confint(tst; tail=:right)[1] - (-4.5865)) .<= 1e-4 -show(IOBuffer(), tst) +show(iobuffer, tst) + +tst = OneSampleTTest(-10:5, tail=:left) +@test abs.(confint(tst)[2] - (-0.4135)) .<= 1e-4 +@test abs.(confint(tst; tail=:right)[1] - (-4.5865)) .<= 1e-4 +show(iobuffer, tst) + +tst = OneSampleTTest(-10:5, tail=:right) +@test abs.(confint(tst; tail=:left)[2] - (-0.4135)) .<= 1e-4 +@test abs.(confint(tst)[1] - (-4.5865)) .<= 1e-4 +show(iobuffer, tst) # Paired samples @test abs(pvalue(OneSampleTTest([1, 1, 2, 1, 0], [0, 1, 1, 1, 0])) - 0.1778) <= 1e-4 @@ -50,13 +76,25 @@ tst = EqualVarianceTTest(a1, a2) @test abs(tst.t - 1.959) <= 1e-3 @test abs(pvalue(tst) - 0.078) <= 1e-3 @test all(abs.([confint(tst)...] - [-0.0131, 0.2031]) .<= 1e-4) -@test default_tail(tst) == :both -show(IOBuffer(), tst) +@test tail(tst) == :both +show(iobuffer, tst) + +tst = EqualVarianceTTest(a1, a2, tail=:left) +@test abs(pvalue(tst) - 0.960) <= 1e-3 +@test abs(pvalue(tst, tail=:both) - 0.078) <= 1e-3 +@test tail(tst) == :left +show(iobuffer, tst) tst = UnequalVarianceTTest(a1, a2) @test abs(tst.df - 7.03) <= 0.01 @test abs(tst.t - 1.959) <= 1e-3 @test abs(pvalue(tst) - 0.091) <= 1e-3 @test all(abs.([confint(tst)...] - [-0.0196, 0.2096]) .<= 1e-4) -@test default_tail(tst) == :both -show(IOBuffer(), tst) +@test tail(tst) == :both +show(iobuffer, tst) + +tst = UnequalVarianceTTest(a1, a2, tail=:right) +@test abs(pvalue(tst) - 0.045) <= 1e-3 +@test abs(pvalue(tst, tail=:both) - 0.091) <= 1e-3 +@test tail(tst) == :right +show(iobuffer, tst) diff --git a/test/wilcoxon.jl b/test/wilcoxon.jl index 953ea1f7..9dfeabe9 100644 --- a/test/wilcoxon.jl +++ b/test/wilcoxon.jl @@ -1,12 +1,12 @@ using HypothesisTests, Base.Test -using HypothesisTests: default_tail +using HypothesisTests: tail # Basic exact test @test abs(pvalue(ExactSignedRankTest([1:10;], [2:2:20;])) - 0.0020) <= 1e-4 @test abs(pvalue(ExactSignedRankTest([2:2:20;], [1:10;])) - 0.0020) <= 1e-4 @test abs(pvalue(ExactSignedRankTest([1:10;], [2:2:16; -1; 1])) - 0.4316) <= 1e-4 @test abs(pvalue(ExactSignedRankTest([2:2:16; -1; 1], [1:10;])) - 0.4316) <= 1e-4 -@test default_tail(ExactSignedRankTest([1:10;], [2:2:20;])) == :both +@test tail(ExactSignedRankTest([1:10;], [2:2:20;])) == :both show(IOBuffer(), ExactSignedRankTest([1:10;], [2:2:20;])) # Exact with ties @@ -22,7 +22,7 @@ show(IOBuffer(), ExactSignedRankTest([1:10;], [1:10;])) @test abs(pvalue(ApproximateSignedRankTest([2:2:20;], [1:10;])) - 0.005922) <= 1e-6 @test abs(pvalue(ApproximateSignedRankTest([1:10;], [2:2:16; -1; 1])) - 0.4148) <= 1e-4 @test abs(pvalue(ApproximateSignedRankTest([2:2:16; -1; 1], [1:10;])) - 0.4148) <= 1e-4 -@test default_tail(ApproximateSignedRankTest([1:10;], [2:2:20;])) == :both +@test tail(ApproximateSignedRankTest([1:10;], [2:2:20;])) == :both show(IOBuffer(), ApproximateSignedRankTest([1:10;], [2:2:20;])) # Approximate with ties @@ -36,7 +36,7 @@ show(IOBuffer(), ApproximateSignedRankTest([1:10;], [1:10;])) # # Tests for automatic selection @test abs(pvalue(SignedRankTest([1:10;], [2:2:20;])) - 0.0020) <= 1e-4 @test abs(pvalue(SignedRankTest([1:10;], [2:11;])) - 0.0020) <= 1e-4 -@test default_tail(SignedRankTest([1:10;], [2:2:20;])) == :both +@test tail(SignedRankTest([1:10;], [2:2:20;])) == :both show(IOBuffer(), SignedRankTest([1:10;], [2:2:20;])) # One Sample tests diff --git a/test/z.jl b/test/z.jl index d83be510..d9c2b653 100644 --- a/test/z.jl +++ b/test/z.jl @@ -1,6 +1,6 @@ using HypothesisTests, Base.Test using Distributions -using HypothesisTests: default_tail +using HypothesisTests: tail # This is always the null in our tests. null = Normal(0.0, 1.0) @@ -20,7 +20,7 @@ tst = OneSampleZTest(x) @test pvalue(tst) ≈ 2 * min(cdf(null, z), ccdf(null, z)) @test pvalue(tst; tail=:left) ≈ cdf(null, z) @test pvalue(tst; tail=:right) ≈ ccdf(null, z) -@test default_tail(tst) == :both +@test tail(tst) == :both show(IOBuffer(), tst) tst = OneSampleZTest(m, s, n) @@ -83,7 +83,7 @@ z = xbar / se @test pvalue(tst) ≈ 2 * min(cdf(null, z), ccdf(null, z)) @test pvalue(tst; tail=:left) ≈ cdf(null, z) @test pvalue(tst; tail=:right) ≈ ccdf(null, z) -@test default_tail(tst) == :both +@test tail(tst) == :both @test confint(tst)[1] ≈ xbar + quantile(null, 0.05 / 2) * se @test confint(tst)[2] ≈ xbar + cquantile(null, 0.05 / 2) * se show(IOBuffer(), tst)