forked from hcp4715/R4Psy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
chapter_12-citation.bib
38 lines (34 loc) · 2.52 KB
/
chapter_12-citation.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
@article{Hu_2020,
title = {Good {Me} {Bad} {Me}: {Does} {Valence} {Influence} {Self}-{Prioritization} {During} {Perceptual} {Decision}-{Making}?},
volume = {6},
copyright = {All rights reserved},
url = {https://doi.org/10.1525/collabra.301},
doi = {10.1525/collabra.301},
language = {en},
number = {1},
journal = {Collabra-Psychology},
author = {Hu, Chuan-Peng and Lan, Yuxuan and Macrae, C. Neil and Sui, Jie},
year = {2020},
note = {Type: Journal Article},
pages = {20},
}
@article{R-papaja,
author = {Frederik Aust and Marius Barth},
title = {{papaja}: {Create} {APA} manuscripts with {R Markdown}}
}
@article{schoenbrodt_sequential_2017,
title = {Sequential hypothesis testing with {Bayes} factors: {Efficiently} testing mean differences},
volume = {22},
issn = {1939-1463},
shorttitle = {Sequential hypothesis testing with {Bayes} factors},
doi = {10.1037/met0000061},
abstract = {Unplanned optional stopping rules have been criticized for inflating Type I error rates under the null hypothesis significance testing (NHST) paradigm. Despite these criticisms, this research practice is not uncommon, probably because it appeals to researcher’s intuition to collect more data to push an indecisive result into a decisive region. In this contribution, we investigate the properties of a procedure for Bayesian hypothesis testing that allows optional stopping with unlimited multiple testing, even after each participant. In this procedure, which we call Sequential Bayes Factors (SBFs), Bayes factors are computed until an a priori defined level of evidence is reached. This allows flexible sampling plans and is not dependent upon correct effect size guesses in an a priori power analysis. We investigated the long-term rate of misleading evidence, the average expected sample sizes, and the biasedness of effect size estimates when an SBF design is applied to a test of mean differences between 2 groups. Compared with optimal NHST, the SBF design typically needs 50\% to 70\% smaller samples to reach a conclusion about the presence of an effect, while having the same or lower long-term rate of wrong inference. (PsycInfo Database Record (c) 2020 APA, all rights reserved)},
number = {2},
journal = {Psychological Methods},
author = {Schoenbrodt, Felix D. and Wagenmakers, Eric-Jan and Zehetleitner, Michael and Perugini, Marco},
year = {2017},
note = {Place: US
Publisher: American Psychological Association},
keywords = {Hypothesis Testing, Null Hypothesis Testing, Statistical Probability, Type I Errors, Mean},
pages = {322--339},
}