Hinweis: Die Gliederung zu den Befehlen Und Ergebnissen in R orientiert sich an dem Inhaltsverzeichnis der 17. Auflage der ‘Angewandten Statistik’. Nähere Hinweise zu den verwendeten Formeln sowie Erklärungen zu den Beispielen sind in dem Buch (Ebook) nachzulesen!

zur Druckversion

Hinweis: Die thematische Gliederung zu den aufgeführten Befehlen und Beispielen orientiert sich an dem Inhaltsverzeichnis der 17. Auflage der ‘Angewandten Statistik’. Nähere Hinweise zu den verwendeten Formeln sowie Erklärungen zu den Beispielen sind in dem Buch (Ebook) nachzulesen!

Aktuelle R-Umgebung zu den Beispielen

The R Project for Statistical Computing

sessionInfo()
## R version 3.6.3 (2020-02-29)
## Platform: x86_64-w64-mingw32/x64 (64-bit)
## Running under: Windows 10 x64 (build 18363)
## 
## Matrix products: default
## 
## locale:
## [1] LC_COLLATE=German_Germany.1252  LC_CTYPE=German_Germany.1252   
## [3] LC_MONETARY=German_Germany.1252 LC_NUMERIC=C                   
## [5] LC_TIME=German_Germany.1252    
## 
## attached base packages:
## [1] stats     graphics  grDevices utils     datasets  methods   base     
## 
## loaded via a namespace (and not attached):
##  [1] compiler_3.6.3  magrittr_1.5    tools_3.6.3     htmltools_0.4.0
##  [5] yaml_2.2.1      Rcpp_1.0.4      stringi_1.4.6   rmarkdown_2.1  
##  [9] knitr_1.28      stringr_1.4.0   xfun_0.12       digest_0.6.25  
## [13] rlang_0.4.5     evaluate_0.14

Download von Datensätzen, die in den folgenden Beispielen verwendet werden:

Schweissraten : sweat

7.1 Der statistische Test

7.1.5 Wie oft wird eine wahre Nullhypothese abgelehnt?

r <- 1:5
p <- 0.01
n <- c(22, 83, 154, 231, 310)
P <- round(pnbinom(n-r, r, p), 2); P
## [1] 0.2 0.2 0.2 0.2 0.2

7.1.6 Einstichproben-Gauss-Test

alpha <- 0.05 
mu.0  <- 20
sigma <- 10
n     <- 25

l.a    <- mu.0 - qnorm(1-alpha, mean=0, sd=1)*(sigma/sqrt(n)); l.a
## [1] 16.71029
l.b    <- mu.0 + qnorm(1-alpha, mean=0, sd=1)*(sigma/sqrt(n)); l.b
## [1] 23.28971
l.c1   <- mu.0 - qnorm(1-alpha/2, mean=0, sd=1)*(sigma/sqrt(n)); l.c1
## [1] 16.08007
l.c2   <- mu.0 + qnorm(1-alpha/2, mean=0, sd=1)*(sigma/sqrt(n)); l.c2
## [1] 23.91993

m      <- 16
p.a    <- pnorm(m, mean=mu.0, sd=sigma/sqrt(n)); p.a 
## [1] 0.02275013
p.b    <- 1 - pnorm(m, mean=mu.0, sd=sigma/sqrt(n)); p.b 
## [1] 0.9772499
p.c    <- 2 * pnorm(m, mean=mu.0, sd=sigma/sqrt(n)); p.c
## [1] 0.04550026

7.1.7 Powerfunktion

alpha <- 0.05
nro1  <- 10; nro2 <- 20;
diff  <- seq(-4, +4, by=0.01)
stdev <- 2

result1 <- power.t.test(n = nro1, delta = diff, sd = stdev, 
    sig.level = alpha, type = "one.sample", alternative = "two.sided")
result2 <- power.t.test(n = nro2, delta = diff, sd = stdev, 
    sig.level = alpha, type = "one.sample", alternative = "two.sided")
    
par(mfcol=c(1,1), lwd=2, font.axis=2, bty="l", ps=13)  
plot(diff/stdev, result1$power, type = "l", xlim=c(-1.6, +1.6), 
        ylim=c(0,1), axes = FALSE,
     xlab = expression(paste("Differenz  ",mu - mu[0],
     "  in Einheiten von  ",sigma[0],"  f?r  ",alpha," = 0.05")),
     ylab = expression(paste("Power  (1 - ",beta," )")))
axis(2, pos=c(-1.7,0),las=1)    ; axis(2, pos=c(0,0), las=1)
axis(2, pos=c(+1.7,0), las=1)   ; axis(1, pos=c(0,0))
lines(diff/stdev, result2$power); abline(v=1, lty=2)          
text(-0.4,0.7,"n = 20")
text(-0.8,0.4,"n = 10")


power.t.test(n = 10, delta = 5, sd = 5, sig.level = 0.05,
                       type = "one.sample", alternative = "two.sided")
## 
##      One-sample t test power calculation 
## 
##               n = 10
##           delta = 5
##              sd = 5
##       sig.level = 0.05
##           power = 0.8030962
##     alternative = two.sided

power.t.test(n = 20, delta = 5, sd = 5, sig.level = 0.05,
                       type = "one.sample", alternative = "two.sided")
## 
##      One-sample t test power calculation 
## 
##               n = 20
##           delta = 5
##              sd = 5
##       sig.level = 0.05
##           power = 0.9885913
##     alternative = two.sided

7.1.8 Operationscharakteristik

n <- 46                                                         # Umfang der Stichprobe
c <- 1                                                          # Annahmezahl (acceptance)
N <- 1000                                                       # Umfang der Charge

                                         # Wahrscheinlichkeit defekter Einheiten (quality)
p <- seq(0, 0.1, by=0.005)    
P <- pbinom(c, n, p)

pbinom(1, 46, 0.0077)                                           #  AQL für alpha = 0.05             
## [1] 0.9509134
pbinom(1, 46, 0.0819)                                           #  RQL für beta  = 0.10             
## [1] 0.1001856

par(mfrow=c(1,1), lwd=2, font.axis=2, bty="n", ps=14)
plot(p, P, type="b", xlim=c(0, 0.1), ylim=c(0, 1),
     xlab="p - Anteil defekt (Qualität)",
     ylab="P - Wahrscheinlichkeit für Akzeptanz")

abline(h=0.1, lty=1, col="grey")
abline(h=0.95, lty=1, col="grey")

abline(v=0.0075, lty=2, col="grey")
abline(v=0.0825, lty=2, col="grey")

text(0.012, 0.99, expression(alpha), cex=1.5)
text(0.035, 0.99, "(Produzenten-Risiko)")

text(0.012, 0.15, expression(beta), cex=1.5)
text(0.035, 0.15, "(Konsumenten-Risiko)")

text(0.015, 0.00, "AQL = 0.0077", cex=1.2)
text(0.085, 0.00, "RQL = 0.0819", cex=1.2)

AOQ <- P * p * (N-n) / N

par(mfrow=c(1,1), lwd=2, font.axis=2, bty="n", ps=14, cex.axis=1) 
plot(p, AOQ, type="b", xlim=c(0, 0.1), ylim=c(0, 0.02),
     xlab="p - Anteil defekt (Qualit?t)",
     ylab="mittlerer Durchschlupf (AOQ)")
abline(h=0.0174, lty=1)
abline(v=0.035, lty=2)
text(0.06, 0.0185, "AOQL=0.0174")


ATI <- n + (1-P) * (N-n)    
plot(p, ATI, type="b", xlim=c(0, 0.1), ylim=c(0, 1000),
     xlab="p - Anteil defekt (Qualität)",
     ylab="mittlerer Prüfaufwand (ATI)")     

7.2 Tests der Verteilung (Anpassungstests)

7.2.2 Überprüfung des 3. und 4. Moments

x <- c(rep(30, 16), 50, 70, 90, 110)              
n <- length(x);  m <- mean(x)

sqrt(n)*sum((x-m)^3) / sqrt(sum((x-m)^2)^3)                       # skewness 
## [1] 2.146625

n * sum((x-m)^4) / (sum((x-m)^2))^2                               # kurtosis
## [1] 6.248

Funktionen skewness() und kurtosis in library(e1071)

library(e1071)
x <- c(rep(30, 16), 50, 70, 90, 110)
skewness(x)
## [1] 1.987658
kurtosis(x)+3
## [1] 5.63882

Symmetrie-Vorzeichentest:

x <- c(3, 5, 2, 6, 7, 7, 4, 2, 6, 12, 15, 18); sort(x)
##  [1]  2  2  3  4  5  6  6  7  7 12 15 18
n <- length(x); m <- mean(x); I <- ifelse(x<m, 1, 0)
S <- sum(I); Amin <- S; Aplus <- n - S
test <- (Amin - Aplus)^2 / n; test
## [1] 3
qchisq(0.10, 1, lower.tail=F)
## [1] 2.705543

7.2.3 Der Quantile-Quantile Plot

Beispiel Nüchternblutzucker und Cholesterin:

nblz <- c(90,  74,  94,  79, 100,  87,  87,  84,  78,  94,
          73,  99,  85,  83,  70,  84,  91,  99,  85,  89,  
          80,  89,  81,  95,  89,  94,  77,  87,  89,  86,  
          94, 110,  92,  92,  93,  94,  87,  90, 107,  74)

chol <- c(195, 205, 245, 190, 260, 190, 340, 195, 285, 380, 
          220, 240, 235, 215, 190, 275, 205, 290, 200, 210,
          220, 265, 235, 200, 350, 220, 450, 230, 185, 295,
          380, 200, 485, 210, 185, 210, 395, 290, 190, 210)     
                                                          
par(mfrow=c(1,2), lwd=2, font.axis=2, bty="n", ps=11)    
qqnorm(nblz, ylab="Nüchternblutzucker [mg/dl]", xlab="Normalverteilung", 
       main=" ", pch=19, cex=0.9)
qqline(nblz, col = "black", lwd=2)
          
qqnorm(chol, ylab="Cholesterin [mg/dl]", xlab="Normalverteilung", 
             ylim=c(150, 500), main= " ", pch=19, cex=0.9)
qqline(chol, col = "black", lwd=2)

7.2.4 Box-Cox Transformation

bcplot <- function(x, lambda) {
          n <- length(lambda); v <- rep(NA, n)
          for (i in 1:n) {
              if (lambda[i] == 0) y <- log(x)
              if (lambda[i] != 0) y <- (x^lambda[i] - 1)/lambda[i]
              nppl <- qqnorm(y, plot.it = FALSE); v[i]=cor(nppl$x,nppl$y)      }
          j <- which(v == max(v))
          list("cor"=v, "l.i"=lambda, "lambda"=lambda[j])    }
                              
                                                         
x <- c(20, 22, 24, 21, 19, 30, 40, 23, 24, 25, 19)
l <- seq(-5, +2, by=0.1)
lp <- bcplot(x, l); lp$lambda
## [1] -3

par(mfrow=c(1,3), lwd=2, font=2, font.axis=2, bty="l", ps=12)
qqnorm(x, main="vor Transformation",xlab="Q-Normal",ylab="Q-Daten")
qqline(x)
plot(lp$l.i, lp$cor, main="Box-Cox Plot", xlab="Lambda", 
            ylab="Korrelationkoeffizient")
xtrn <- (x^lp$lambda -1)/lp$lambda
qqnorm(xtrn, main="nach Transformation",xlab="Q-Normal", 
            ylab="Q-transf. Daten")
qqline(xtrn)

7.2.5 Der Chiquadrat-Anpassungstest

obs <- c(7, 16, 8, 17, 3, 9); summe <- sum(obs)
exp <- rep(summe/6, 6)

stat <- sum((obs-exp)^2/exp); stat; qchisq(0.95, 5)
## [1] 14.8
## [1] 11.0705

Beispiel Nüchterblutzucker und Cholesterin:

par(mfrow=c(1,2), lwd=2, font.axis=2, bty="n", ps=11)    
breaks <- seq(60, 120, by=8);  breite <- 8
hist(nblz, breaks, plot=T, col="grey",
     main=" ", xlab="Blutzucker [mg/dl]", ylab="Anzahl",xlim=c(60,120))
x      <- seq(60, 120, by=2) 
lines(x, dnorm(x, mean=mean(nblz), 
              sd=sd(nblz)) * breite * length(nblz), col="black", lwd=2)

breaks <- seq(180, 500, by=40); breite <- 40
hist(chol, breaks, plot=T, col="grey", 
     main=" ", xlab="Cholesterin [mg/dl]", ylab="Anzahl",xlim=c(180, 500))
x      <- seq(180, 500, by=40) 
lines(x, dnorm(x, mean=mean(chol), 
              sd=sd(chol)) * breite * length(chol), col="black", lwd=2)

Funktion pearson.test() in library(nortest)

library(nortest)

pearson.test(nblz, n.classes=8, adjust=TRUE)
## 
##  Pearson chi-square normality test
## 
## data:  nblz
## P = 7.6, p-value = 0.1797

pearson.test(chol, n.classes=8, adjust=TRUE)
## 
##  Pearson chi-square normality test
## 
## data:  chol
## P = 21.6, p-value = 0.0006237

7.2.6 Kolmogoroff-Smirnoff Anpassungstest

Beispiel Nüchterblutzucker und Cholesterin:

par(mfrow=c(1,2), lwd=2, font.axis=2, bty="n", ps=11)
n <- length(nblz)
p <- cumsum(rep(1,n)/n)
plot(sort(nblz), p, type="s", xlab="N?chternblutzucker [mg/dl]", ylab="F(x)")
x <- seq(60, 120, by =2)
y <- pnorm(x, mean=mean(nblz), sd=sd(nblz), log=FALSE)
lines(x, y, col="black")

n <- length(chol)
p <- cumsum(rep(1,n)/n)
plot(sort(chol), p, type="s", xlab="Cholesterin [mg/dl]", ylab="F(x)")
x <- seq(180, 500, by =10)
y <- pnorm(x, mean=mean(chol), sd=sd(chol), log=FALSE)
lines(x, y, col="black")

Funktionen ks.test() und lilllie.test() in library(nortest)

library(nortest)

ks.test(nblz, "pnorm", mean(nblz), sd(nblz))
## 
##  One-sample Kolmogorov-Smirnov test
## 
## data:  nblz
## D = 0.10063, p-value = 0.8127
## alternative hypothesis: two-sided

ks.test(chol, "pnorm", mean(chol), sd(chol))
## 
##  One-sample Kolmogorov-Smirnov test
## 
## data:  chol
## D = 0.19969, p-value = 0.08232
## alternative hypothesis: two-sided

lillie.test(nblz)
## 
##  Lilliefors (Kolmogorov-Smirnov) normality test
## 
## data:  nblz
## D = 0.10063, p-value = 0.3897

lillie.test(chol)
## 
##  Lilliefors (Kolmogorov-Smirnov) normality test
## 
## data:  chol
## D = 0.19969, p-value = 0.0003435

7.2.7 Shapiro-Wilk Test

Funktion shapiro.test() in library(nortest)

library(nortest)

shapiro.test(nblz)
## 
##  Shapiro-Wilk normality test
## 
## data:  nblz
## W = 0.98006, p-value = 0.6918

shapiro.test(chol)
## 
##  Shapiro-Wilk normality test
## 
## data:  chol
## W = 0.80629, p-value = 9.187e-06

7.2.8 Anderson-Darling Test Anpassungstest

Funktion ad.test() in library(nortest)

library(nortest)

ad.test(nblz)
## 
##  Anderson-Darling normality test
## 
## data:  nblz
## A = 0.30513, p-value = 0.5525

ad.test(chol)
## 
##  Anderson-Darling normality test
## 
## data:  chol
## A = 2.761, p-value = 4.39e-07

7.2.9 Ausreißerproblem

par(mfrow=c(1,1), lwd=2, font.axis=2, bty="n", ps=11)
x <- c(3.2, 5.1, 5.0, 5.3, 5.1, 5.3, 4.8, 5.7, 4.5, 5.4,
       4.6, 4.8, 6.1, 5.5, 4.3, 4.8, 7.4, 8.3) 
bp <- boxplot(x, range=1.5, col="grey", las=1, ylim=c(3,9)); bp$out

## [1] 3.2 7.4 8.3

Äussere Grenzen:

Q <- quantile(x, p=c(0.25,0.5,0.75)); Q1 <- Q[1]; Q3 <- Q[3]
cat("Grenzen:",Q1 - IQR(x)*1.5,"-",Q3 + IQR(x)*1.5,"\n")  
## Grenzen: 3.7875 - 6.4875

Ausreißer nach Hampel Kriterium:

x <- c(3.2, 5.1, 5.0, 5.3, 5.1, 5.3, 4.8, 5.7, 4.5, 5.4,
       4.6, 4.8, 6.1, 5.5, 4.3, 4.8, 7.4, 8.3)
med.x <- median(x);      
mad.x <- mad(x, constant=1)

outlier <- (x < med.x - 5.2*mad.x) | (x > med.x + 5.2*mad.x)
x[outlier]
## [1] 3.2 7.4 8.3

7.2.9.1 Grubbs-Test

x <- c(3, 4, 4, 5, 6, 6, 7, 8, 9, 10, 10, 
            11, 13, 15, 16, 17, 19, 19, 20, 50)
n <- length(x); m.x <- mean(x); s.x <- sd(x); 
alpha <- 0.05; t <- qt(alpha/(2*n), n-2)
G.hat  <- max(abs(x-m.x))/s.x; G.hat
## [1] 3.610448
G.crit <- ((n-1)/sqrt(n)) * sqrt(t^2 / (n-2+t^2)); G.crit
## [1] 2.708246

Funktion grubbs.test() in library(outliers)

library(outliers)

grubbs.test(x)
## 
##  Grubbs test for one outlier
## 
## data:  x
## G = 3.61045, U = 0.27782, p-value = 2.113e-05
## alternative hypothesis: highest value 50 is an outlier

7.2.9.2 Q-Test nach Dixon

Funktion dixon.test() in library(outliers)

x <- c(3, 4, 4, 5, 6, 6, 7, 8, 9, 10, 10, 
            11, 13, 15, 16, 17, 19, 19, 20, 50)

library(outliers)

dixon.test(x)
## 
##  Dixon test for outliers
## 
## data:  x
## Q = 0.67391, p-value < 2.2e-16
## alternative hypothesis: highest value 50 is an outlier

7.3 Einstichprobenverfahren

7.3.1 Hypothesen zu Wahrscheinlichkeiten

7.3.1.1 Binomialtest

par(mfrow=c(1,2), lwd=2, font.axis=2, bty="n", ps=11)
n  <- 30
p  <- 0.7
x  <- 0:n
fx <- dbinom(x, n, p)    
plot(x, fx, type="h", ylim=c(0, 0.2), xlim=c(0,n), ylab="f(x)", 
     xlab=" ", lty=3, col="grey")
points(x, fx, pch=19, cex=0.8, col="black")

Fx <- pbinom(x, n, p) 
x1 <- c(x, n+1, n+2)
Fx <- c(Fx, 1, 1)
plot(x1, Fx, type="s", ylim=c(0,1), xlim=c(0,n+2), ylab="F(x)", xlab=" ")
lines(c(0,0),c(0,Fx[1]))

                                                           
pbinom(25, 30, 0.7, lower.tail=FALSE)                           # Binomialtest
## [1] 0.03015494

binom.test(26, 30, p=0.7, alternative="greater")
## 
##  Exact binomial test
## 
## data:  26 and 30
## number of successes = 26, number of trials = 30, p-value = 0.03015
## alternative hypothesis: true probability of success is greater than 0.7
## 95 percent confidence interval:
##  0.7203848 1.0000000
## sample estimates:
## probability of success 
##              0.8666667
qbinom(0.95, 30, 0.7)
## [1] 25

Beispiel reguläre Münze:

binom.test(15, 20, p=0.5, alternative="two.sided")      
## 
##  Exact binomial test
## 
## data:  15 and 20
## number of successes = 15, number of trials = 20, p-value = 0.04139
## alternative hypothesis: true probability of success is not equal to 0.5
## 95 percent confidence interval:
##  0.5089541 0.9134285
## sample estimates:
## probability of success 
##                   0.75

Zweiseitige Hypothesen:

n  <- 20; x  <- 15 ; p0 <- 0.5
pbinom(n-x, n, p0, lower.tail=TRUE) + pbinom(x-1, n, p0, lower.tail=FALSE)
## [1] 0.04138947
n  <- 20; x  <- 15 ; p0 <- 0.2
P <- 0; pH <- dbinom(x, n, p0)
for (i in 0:n) P <- ifelse(dbinom(i,n,p0) <= pH, P+dbinom(i,n,p0), P)
P
## [1] 1.802764e-07

7.3.1.2 Approximation durch die Normalverteilung

N <- 2000; n <- 400; x <- 184; p0 <- 0.40; p <- x/n  # Beispiel: H?ndler 
z <- (abs(p-p0) - 1/(2*n)) / sqrt(((p0*(1-p0))/n)*((N-n)/(N-1))); z
## [1] 2.680888
pnorm(z, lower.tail=F)
## [1] 0.003671356

binom.test(184, 400, p=0.40, alternative="greater")           # exakter Test ...
## 
##  Exact binomial test
## 
## data:  184 and 400
## number of successes = 184, number of trials = 400, p-value = 0.008542
## alternative hypothesis: true probability of success is greater than 0.4
## 95 percent confidence interval:
##  0.4180829 1.0000000
## sample estimates:
## probability of success 
##                   0.46

7.3.1.3 Fallzahlabschätzung

alpha <- 0.05; beta <- 0.20                        
p0 <- c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8)
 p <- c(0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)
 
ceiling(((qnorm(1-alpha) + qnorm(1-beta))^2 * 
             (p0*(1-p0) + p*(1-p))) / (p-p0)^2)
## [1] 155 229 279 303 303 279 229 155

power.prop.test(n=NULL, p1=0.1, p2=0.2, sig.level=0.05, 
            power=0.80, alternative="one.sided")
## 
##      Two-sample comparison of proportions power calculation 
## 
##               n = 156.6054
##              p1 = 0.1
##              p2 = 0.2
##       sig.level = 0.05
##           power = 0.8
##     alternative = one.sided
## 
## NOTE: n is number in *each* group
prob <- seq(0.01, 0.10, by=0.004); lp <- length(prob)
conf <- c(0.80, 0.90, 0.95);       lc <- length(conf)
ntab <- matrix(rep(NA, lp*lc), ncol = lp, byrow = TRUE)

for (i in 1:lc) 
{ for (j in 1:lp) ntab[i,j] <- (log10(1-conf[i])/log10(1-prob[j])) }
rownames(ntab) <- c("80%","90%","95%")
colnames(ntab) <- prob
as.data.frame(ceiling(ntab)[,1:10])

par(mfrow=c(1,1), lwd=2, bty="l")
plot(ntab[1,], prob, type="b", las=1, lty=1, pch=1, xlim=c(10, 120),
     yaxp=c(0.01, 0.20, 19), xaxp=c(10, 120, 11),
     ylab="Wahrscheinlichkeit", xlab="Fallzahl")
lines(ntab[2,], prob, type="b", lty=1, pch=2)
lines(ntab[3,], prob, type="b", lty=1, pch=5)
abline(h=seq(20,300,20), col="grey", lty=2)
abline(h=seq(0.01, 0.10, 0.01), lty=2, col="grey")
legend("topright", pch = c(1,2,5), bty="n",
       legend=c("80% Power","90% Power","95% Power"), cex=1.2)

7.3.1.4 Likelihood-Quotienten-Test

n <- 60; x <- 4; p0 <- 1/6                   
minus2ll <- 2*(x*log(x/(n*p0)) + (n-x)*log((n-x)/(n-n*p0)))
minus2ll
## [1] 5.362487

pchisq(minus2ll, 1, lower.tail = FALSE)
## [1] 0.02057441

qchisq(0.95, 1)
## [1] 3.841459

binom.test(c(x, n-x), p = p0, alternative="less")              # exakter Test ...
## 
##  Exact binomial test
## 
## data:  c(x, n - x)
## number of successes = 4, number of trials = 60, p-value = 0.02019
## alternative hypothesis: true probability of success is less than 0.1666667
## 95 percent confidence interval:
##  0.0000000 0.1460972
## sample estimates:
## probability of success 
##             0.06666667

7.3.2 Hypothesen zu Erwartungswerten mit Bezug auf den Mittelwert

7.3.2.1 Einstichproben t-Test

m <- 9; s <- 2; n <- 25
t.hat <- abs(m-10)/(s/sqrt(n)); t.hat
## [1] 2.5

t.krit <- qt(0.975, n-1); t.krit
## [1] 2.063899

Beispiel erhöhter Blutdruck:

mue        <- 85                                               # Mittelwert Blutdruck     
standardab <- 9                                                # Standardabweichung
n          <- 11                                               # Anzahl der Fälle
sem        <- standardab / sqrt(n)

mue.0      <- 80                                               # Erwartungswert unter H.0
tquant     <- qt(0.95, n-1); tquant                            # Quantil Testverteilung  
## [1] 1.812461
mkrit      <- mue.0 + tquant*sem;   mkrit                      # kritischer Wert         
## [1] 84.9183
p.val <- pt((mue-mue.0)/sem, n-1, lower.tail=FALSE)                                            
mue.a      <- 88                                               # Erwartungswert unter H.A

x.i        <- seq(mue.0 - 3*sem, mue.a + 3*sem, by=0.1)
f0.i       <- dnorm(x.i, mean=mue.0, sd=sem)
fa.i       <- dnorm(x.i, mean=mue.a, sd=sem)

par(mfrow=c(1,2), lwd=2, font.axis=2, bty="n", ps=11)
plot(x.i, f0.i, col="black", xlim=c(70, 100), ylim=c(0,0.15), yaxs="r",
     type="l", xlab="mittlerer DBP (mmHg)", ylab="f(x)")
abline(v=mkrit, col="black")
text(77, 0.151, "Annahmebereich", col="black", cex=0.8)
xval  <- seq(mkrit, mue.a + 3*sem, by=0.01)
xarea <- c(mkrit, xval); yarea <- c(0, dnorm(xval, mean=mue.0, sd=sem)) 
polygon(xarea, yarea, col="black", density=15, angle=135)
text(91, 0.02, expression(alpha == 0.05), cex=1)

plot(x.i, f0.i, col="black", xlim=c(70, 100), ylim=c(0,0.15), yaxs="r",
     type="l", xlab="mittlerer DBP (mmHg)", ylab="f(x)")
abline(v=mkrit, col="black")
text(77, 0.151, "Annahmebereich", col="black", cex=0.8)
xval  <- seq(mkrit, mue.a + 3*sem, by=0.01)
xarea <- c(mkrit, xval); yarea <- c(0, dnorm(xval, mean=mue.0, sd=sem)) 
polygon(xarea, yarea, col="black", density=15, angle=135)
lines(x.i, fa.i, col="black")
text(93, 0.151, "Ablehnungsbereich", col="black", cex=0.8)
xval  <- seq(mue.0 - 3*sem, mkrit, by=0.01)
xarea <- c(xval, mkrit); yarea <- c(dnorm(xval, mean=mue.a, sd=sem), 0) 
polygon(xarea, yarea, col="black", density=15)
text(79,0.03, expression(beta == 0.14), cex=1)

Abschätzung der Power:

tstat <- (86 - mue.0)/sem;   tstat                           # Teststatistik
## [1] 2.211083
                                      
pt(tstat, n-1, lower.tail=F)                                 # p-Wert für 'größer'
## [1] 0.02573309
                                                
beta <- pt((mkrit - mue.a)/sem, n-1, lower.tail=T)           # Power  
beta
## [1] 0.1412941

1-beta;
## [1] 0.8587059

Beispiel Körpertemperatur:

temp <- c(36.8, 37.2, 37.5, 37.0, 36.9, 37.4, 37.9, 38.0)

t.test(temp, alternative="greater", mu=37)
## 
##  One Sample t-test
## 
## data:  temp
## t = 2.1355, df = 7, p-value = 0.03505
## alternative hypothesis: true mean is greater than 37
## 95 percent confidence interval:
##  37.03807      Inf
## sample estimates:
## mean of x 
##   37.3375

7.3.2.2 Fallzahl und Power

d <- 15; s <- 30                        
effekt <- d / s

alpha <- 0.05; beta <- 0.20
n.1 <- ceiling((qnorm(1-alpha) + qnorm(1-beta))^2 / effekt^2);         n.1
## [1] 25
n.2 <- ceiling((qt(1-alpha, n.1-1) + qt(1-beta, n.1-1))^2 / effekt^2); n.2
## [1] 27
n.3 <- ceiling((qt(1-alpha, n.2-1) + qt(1-beta, n.2-1))^2 / effekt^2); n.3
## [1] 27

                                                           # Funktion power.t.test()
power.t.test(delta=15, sd=30, sig.level=0.05, power=0.80, 
             type="one.sample",  alternative="one.sided")
## 
##      One-sample t test power calculation 
## 
##               n = 26.13751
##           delta = 15
##              sd = 30
##       sig.level = 0.05
##           power = 0.8
##     alternative = one.sided
par(mfrow=c(1,1), lwd=2, font.axis=2, bty="n", ps=11)
power <- power.t.test(n=10:50, delta=15, sd=30, 
             sig.level=0.05, power=NULL, type="one.sample", 
             alternative="one.sided")$power

plot(10:50, power, xlim=c(10,50), ylim=c(0.30, 1.0), type="b", las=1,
     xlab="Anzahl der Fälle", ylab="Teststärke (Power)"); grid()