#Part 1 #Attempt to Replicate with n=500 per group, mu=19(church),20(nochurch), #sigma=5 (both groups) d<-read.csv(file.choose()) t.test(mot_apol ~ group, data=d) #Replication tapply(d$mot_apol,d$group,mean) tapply(d$mot_apol,d$group,sd) mta<-c(rnorm(500,mean=19.29,sd=4.1), rnorm(500,mean=20.12,sd=4.1)) t.test(mta~d$group) #probably significant as well #Replication for approx #n = 50 per group d2<-sample_n(d,100) table(d2$group) t.test(mot_apol ~ group, data=d2) #Run these lines over and over again #to get a sense of the replicability #What does replicate pretty well? (CI width?) #Did the effect replicate? #Part 2 #Let's see if flexible stopping rules actually affect the probability #of rejecting the null hypothesis #Run the first four lines first, this is your "initial data" #Note that we are sampling both male and female data from a standard #normal distribution so the population means are equal (i.e., Ho is true) dep<-rnorm(6) sex<-rep(c("m","f"),c(3,3)) dat<-data.frame(dep,sex) t.test(dep ~ sex) #Add one case at a time, and check for significance each time #Highlight the following three lines, and repeatedly click on "Run" or #press ctrl-enter # Stop if the p-values goes below .10 (our alpha level) or you get tired dat[nrow(dat)+1,1]<-rnorm(1) dat[nrow(dat),2]<-sample(c("m","f"),1) t.test(dep ~ sex, data=dat) #Did you find that the effect ever became statistically significant? #Now imagine that there actually is an effect, even if it's small