Back to Article
Script C: Experiment 2
Download Source

Script C: Experiment 2

In [1]:
library(osfr)
library(tidyverse)
── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
✔ dplyr     1.1.4     ✔ readr     2.1.5
✔ forcats   1.0.0     ✔ stringr   1.5.1
✔ ggplot2   3.5.1     ✔ tibble    3.2.1
✔ lubridate 1.9.3     ✔ tidyr     1.3.1
✔ purrr     1.0.2     
── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
✖ dplyr::filter() masks stats::filter()
✖ dplyr::lag()    masks stats::lag()
ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(knitr)
library(gt)
library(gtExtras)
library(rstatix)

Attaching package: 'rstatix'

The following object is masked from 'package:stats':

    filter
library(ggpubr)
library(here)
here() starts at /Users/rp3650/Library/CloudStorage/GoogleDrive-robpetrosino@gmail.com/My Drive/Academics/projects/morphology/morphological-decomposition/sub-projects/frequency-effects/frequency-effect_masked-priming
library(predictionInterval)
library(lme4)
Loading required package: Matrix

Attaching package: 'Matrix'

The following objects are masked from 'package:tidyr':

    expand, pack, unpack
library(car)
Loading required package: carData

Attaching package: 'car'

The following object is masked from 'package:dplyr':

    recode

The following object is masked from 'package:purrr':

    some
library(emmeans)
Welcome to emmeans.
Caution: You lose important information if you filter this package's results.
See '? untidy'

Data analysis

In [2]:

# load the raw data dataframe
exp2_data_folder <- "data/experiment2"
exp2_rawdata_filename <- "experiment_2_preprocessed_data.csv"

## 02. check if the rawdata file exists. if not, download it from OSF.
if (!file.exists(here(exp2_data_folder, exp2_rawdata_filename))) {
  osf_retrieve_file("k3gpc") |> 
    osf_download(path = here(exp2_data_folder),
                 conflicts = "overwrite") 
}

## 03. read the data into R.
exp2_rawdata <- here(exp2_data_folder, exp2_rawdata_filename) |>
  read.csv(na = c("", "NA")) %>%
  mutate(primeTime = primeDuration - maskDuration) %>% # calculating the actual SOA
  rename(list = "Group_Nr")

exp2_info <- list()
exp2_info$intended_prime_duration <- 33
exp2_info$prime_dur_lb <- 25
exp2_info$prime_dur <- 50
exp2_info$prime_dur_ub <- 60
exp2_info$rt_lb <- 200
exp2_info$rt_ub <- 1800
exp2_info$freq_conditions <- c("high", "low", "non-word")
exp2_info$n_recruited <- exp2_rawdata$Rec_Session_Id |>
  unique() |>
  length()

exp2_rawdata.sub <- exp2_rawdata %>%
  filter(!is.na(TimeMeasure_Mean) & !is.na(primeDuration) & !is.na(responseError))

exp2_subj.error <- exp2_rawdata.sub %>% group_by(Crowdsourcing_SubjId) %>%
  summarise(mean.error = mean(responseError))
            
exp2_info$summary <- with(
  transform(exp2_rawdata.sub,
    RT_inrange = ifelse(RT >= exp2_info$rt_lb & RT <= exp2_info$rt_ub, 1, 0),
    Prime_inrange = ifelse((primeDuration - maskDuration) >= exp2_info$prime_dur_lb &
                             (primeDuration - maskDuration) <= exp2_info$prime_dur_ub, 1, 0)),
  {
    data.frame(aggregate(Start_Time ~ Rec_Session_Id + Crowdsourcing_SubjId, data=exp2_rawdata.sub, unique),
               aggregate(End_Time_Local ~ Rec_Session_Id + Crowdsourcing_SubjId, data=exp2_rawdata.sub, unique),
               aggregate(cbind(list, SelectedGender, SelectedAge) ~ Rec_Session_Id + Crowdsourcing_SubjId, data=exp2_rawdata.sub, unique),
      aggregate(cbind(responseError, RT_inrange, Prime_inrange) ~ Rec_Session_Id + Crowdsourcing_SubjId, mean, data=exp2_rawdata.sub)
  )
}
)

exp2_info$summary <- exp2_info$summary[, -grep("Rec_Session_Id.|Crowdsourcing_SubjId.", colnames(exp2_info$summary))] # remove all extra aggregating columns (subj ID)

exp2_info$summary$Duration <- interval(ymd_hms(exp2_info$summary$Start_Time), 
                                             ymd_hms(exp2_info$summary$End_Time_Local)) |>
                                      lapply(function(interval_value) {interval_value/dminutes(1)}) |> 
                                           unlist()

Step 1: subject and item performance

In [3]:

exp2_step1_goodsubj <- exp2_info$summary |>
  subset(responseError <= .3) 

exp2_step1_subj_remain <- exp2_step1_goodsubj |> nrow()

exp2_step1_item.err <- exp2_rawdata.sub %>% group_by(condition_rec, target_rec) %>%
  summarise(word.percent=mean(responseError)*100) %>% 
  filter(word.percent > 30)
`summarise()` has grouped output by 'condition_rec'. You can override using the
`.groups` argument.

exp2_subj_filter_1 <- exp2_step1_goodsubj$Rec_Session_Id
exp2_item_filter_1 <- exp2_step1_item.err$target_rec

exp2_data_step1 <- exp2_rawdata.sub |>
  subset(condition_rec %in% exp2_info$freq_conditions) |>
  subset(Rec_Session_Id %in% exp2_subj_filter_1 & 
         !target_rec %in% exp2_item_filter_1)

Step 2: prime durations

In [4]:

exp2_summary.primeTime <- exp2_rawdata.sub %>% 
  summarise(meanPrimeTime = round(mean(primeTime), 2), 
            sdPrimeTime = round(sd(primeTime), 2))

exp2_primeTimeRangeSummary <- exp2_rawdata.sub %>% 
  group_by(primeTime) %>%
  mutate(range = ifelse(primeTime < exp2_info$prime_dur_lb, "below", 
                        ifelse(primeTime > exp2_info$prime_dur_ub, "above",
                               "in range"))) %>% 
  group_by(range) %>% tally() %>% ungroup() %>%
  mutate(range.percent = round((n*100)/nrow(exp2_rawdata.sub),2))

exp2_data_step2 <- exp2_data_step1  |>
  subset(primeTime >= exp2_info$prime_dur_lb & primeTime <= exp2_info$prime_dur_ub)

exp2_step2_subj_remain <- exp2_data_step2$Rec_Session_Id |>
  unique() |>
  length()

exp2_step2_trials_remain <- nrow(exp2_data_step2)

Step 3: RT distribution

In [5]:

# RT outliers 
exp2_data_step3 <- exp2_data_step2 |> 
  subset(RT >= exp2_info$rt_lb & RT <= exp2_info$rt_ub)

exp2_step3_subj_remain <- exp2_data_step3$Rec_Session_Id |>
  unique() |>
  length()

exp2_step3_trials_remain <- nrow(exp2_data_step3)

# error trial removal

exp2_data_step3b <- exp2_data_step3  |>
  subset(responseError == 0)

exp2_step3b_subj_remain <- exp2_data_step3b$Rec_Session_Id |>
  unique() |>
  length()

exp2_step3b_trials_remain <- nrow(exp2_data_step3b)

# remove subjects with less than 7 trials in at least one condition*primetype combination (half of the total number of items per combination)
rt_data_labels <- c("Rec_Session_Id", "condition_rec", "primetype_rec", "RT")

exp2_subj_filter_2 <- exp2_data_step3b[, rt_data_labels] |>
  aggregate(RT ~ ., FUN = length, drop = FALSE) |>
  subset(RT < 7, select = Rec_Session_Id) |>
  unique() |>
  unlist()

### we also want to sure that all subjects have all conditions; in case some subject had all the trials for a given condition lost down the road, they will be removed
exp2_subj_filter_conditions <- 
  exp2_data_step3b %>%
  group_by(Rec_Session_Id) %>% 
  distinct(condition_rec, primetype_rec) %>% 
  tally() %>% filter(n != 6) %>% pull(Rec_Session_Id)

exp2_data_final <- exp2_data_step3b |>
  subset(!(Rec_Session_Id %in% exp2_subj_filter_2) & !(Rec_Session_Id %in% exp2_subj_filter_conditions)) %>%
  mutate(condition_rec = as.factor(condition_rec), primetype_rec=as.factor(primetype_rec))

exp2_final_subj_remain <- exp2_data_final$Rec_Session_Id |>
  unique() |> 
  length()
  
exp2_final_trials_remain <- nrow(exp2_data_final)

Results

In [6]:

# error rates averages
### we also want to sure that all subjects have all conditions; in case some subject had all the trials for a given condition lost down the road, they will be removed. Crucially the trial calculations are made on the dataset *before* the trial error removal step
exp2_subj_filter_2_with.errors <- exp2_data_step3[, rt_data_labels] |>
  aggregate(RT ~ ., FUN = length, drop = FALSE) |>
  subset(RT < 7, select = Rec_Session_Id) |>
  unique() |>
  unlist()
# this step just makes sure that the same subjects will be removed from both datasets
exp2_subj_filter_2_with.errors <- union(exp2_subj_filter_2_with.errors, exp2_subj_filter_2)

### just making sure that all subjects have all conditions; in case some subject had all the trials for a given condition lost down the road, they will be removed
exp2_subj_filter_conditions_with.errors <- 
  exp2_data_step3 %>%
  group_by(Rec_Session_Id) %>% 
  distinct(condition_rec, primetype_rec) %>% 
  tally() %>% filter(n != 6) %>% pull(Rec_Session_Id)

exp2_data_final_with.errors <- exp2_data_step3 |>
  subset(!(Rec_Session_Id %in% exp2_subj_filter_2_with.errors) & 
           !(Rec_Session_Id %in% exp2_subj_filter_conditions_with.errors)) 

exp2_error.rates <- exp2_data_final_with.errors %>%
  mutate(primetype_rec = factor(primetype_rec, levels=c("unrelated", "related")),
         condition_rec = factor(condition_rec, levels=c("high", "low", "non-word"))) %>%
  group_by(condition_rec, primetype_rec, Rec_Session_Id) %>%
  summarise(error.percent=mean(responseError)*100)
`summarise()` has grouped output by 'condition_rec', 'primetype_rec'. You can
override using the `.groups` argument.

# RT averages
exp2_rt.avg_subj <- exp2_data_final %>% 
  group_by(Rec_Session_Id, condition_rec, primetype_rec) %>%
  summarise(meanRT = mean(RT))
`summarise()` has grouped output by 'Rec_Session_Id', 'condition_rec'. You can
override using the `.groups` argument.

exp2_rt_cor <- exp2_data_final %>% 
  group_by(Rec_Session_Id, condition_rec, primetype_rec) %>%
  dplyr::summarise(meanRT=mean(RT)) %>%
  select(Rec_Session_Id, condition_rec, primetype_rec, meanRT) %>% 
  pivot_wider(names_from='primetype_rec', values_from=c('meanRT')) %>%
  group_by(condition_rec) %>%
  summarise(cor=cor(unrelated, related))
`summarise()` has grouped output by 'Rec_Session_Id', 'condition_rec'. You can
override using the `.groups` argument.

# RT + error averages (by subject)
exp2_avgs_subj <- merge(exp2_rt.avg_subj, exp2_error.rates, by=c("Rec_Session_Id", "condition_rec", "primetype_rec"))

exp2_mop.err_across <- exp2_avgs_subj %>%
  group_by(condition_rec, primetype_rec) %>%
  summarise(gd.mean=mean(meanRT, na.rm=T), sd=sd(meanRT, na.rm=T), mean.error=mean(error.percent)) %>%
  pivot_wider(id_cols="condition_rec", names_from=primetype_rec, values_from=c(gd.mean, sd, mean.error)) %>%
  left_join(exp2_rt_cor, by="condition_rec")
`summarise()` has grouped output by 'condition_rec'. You can override using the
`.groups` argument.

#### ERROR PRIMING CALCULATIONS ####
# we will just run stats for this, no numerical calculations (e.g., priming)

### 2x2 ANOVA
exp2_errors_anova <- exp2_error.rates |>
  transform(Rec_Session_Id = factor(Rec_Session_Id)) |>
  subset(condition_rec %in% c("high", "low")) |>
  aov(error.percent ~ condition_rec * primetype_rec + Error(Rec_Session_Id/(condition_rec * primetype_rec)), data = _)

exp2_error_stats_main <- exp2_error.rates %>%
  mutate(primetype_rec = fct_relevel(primetype_rec, "unrelated", "related")) %>%
  group_by(condition_rec) %>%
  t_test(error.percent ~ primetype_rec, paired=T, detailed=T) %>% select(-3:-7) %>%
  rename(factor = "condition_rec", t='statistic')

### 2x2 GLMER
options(contrasts = c("contr.sum","contr.poly"))
exp2_error_2x2glmer <- glmer(responseError ~ condition_rec * primetype_rec + (1|Rec_Session_Id) + (1|target_rec), 
                             data = subset(exp2_data_final_with.errors, condition_rec %in% c("high", "low")), 
                             family = binomial, control=glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=1e6)))
Anova(exp2_error_2x2glmer, type = 3)
Analysis of Deviance Table (Type III Wald chisquare tests)

Response: responseError
                                Chisq Df Pr(>Chisq)    
(Intercept)                 1948.9866  1  < 2.2e-16 ***
condition_rec                  8.6626  1   0.003248 ** 
primetype_rec                344.7867  1  < 2.2e-16 ***
condition_rec:primetype_rec    7.1806  1   0.007370 ** 
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

### 2x3 GLMER
exp2_error_2x3glmer <- glmer(responseError ~ condition_rec * primetype_rec + (1|Rec_Session_Id) + (1|target_rec), data = exp2_data_final_with.errors, family = binomial, control=glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=1e6)))

exp2_error_emmeans_glmm <- emmeans(exp2_error_2x3glmer, pairwise ~ primetype_rec|condition_rec)

### MAIN EFFECTS
#### t-test
exp2_errors_stats_main <- exp2_error.rates %>% 
  group_by(condition_rec) %>%
  t_test(error.percent ~ primetype_rec, paired=T)

#### RT PRIMING CALCULATIONS ####

### MAIN EFFECTS

### 2x2 ANOVA
exp2_mop_anova <- exp2_rt.avg_subj |>
  transform(Rec_Session_Id = factor(Rec_Session_Id)) |>
  subset(condition_rec %in% c("high", "low")) |>
  aov(meanRT ~ condition_rec * primetype_rec + Error(Rec_Session_Id/(condition_rec * primetype_rec)), data = _)

### 2x2 GLMER 
options(contrasts = c("contr.sum", "contr.poly"))

exp2_mop_2x2glmer <- glmer(RT ~ condition_rec * primetype_rec + (1|Crowdsourcing_SubjId) + (1|target_rec), 
                           data = subset(exp2_data_final, condition_rec %in% c('high', 'low')), 
                           family = Gamma(link="identity"), control=glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=1e6)))

Anova(exp2_mop_2x2glmer)
Analysis of Deviance Table (Type II Wald chisquare tests)

Response: RT
                               Chisq Df Pr(>Chisq)    
condition_rec                293.889  1  < 2.2e-16 ***
primetype_rec               2674.529  1  < 2.2e-16 ***
condition_rec:primetype_rec   50.823  1  1.011e-12 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

### 2x3 GLMER
exp2_mop_2x3glmer <- glmer(RT ~ condition_rec * primetype_rec + (1|Crowdsourcing_SubjId) + (1|target_rec), 
                        data = exp2_data_final %>% mutate(primetype_rec = factor(primetype_rec, levels=c("unrelated", "related"))),
                        family = Gamma(link="identity"), control=glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=1e6)))

exp2_emmeans_glmm <- emmeans(exp2_mop_2x3glmer, pairwise ~ primetype_rec|condition_rec) |> rbind() |> as_tibble() %>%
  select(-contrast, -df) %>% 
  pivot_wider(names_from=primetype_rec, values_from = c(emmean, SE, asymp.LCL, asymp.UCL)) %>%
  mutate(across(contains("emmean"), \(x) round(x)), 
         across(contains("asymp"), \(x) round(x)),
         across(contains("SE"), \(x) round(x, 2))) %>%
  relocate(c(emmean_unrelated, SE_unrelated, asymp.LCL_unrelated, asymp.UCL_unrelated), .before=emmean_related) %>%
    unite("CI_unrelated", asymp.LCL_unrelated:asymp.UCL_unrelated, sep=" ") %>%
  relocate(c(emmean_related, SE_related, asymp.LCL_related, asymp.UCL_related), .before=emmean_related) %>%
    unite("CI_related", asymp.LCL_related:asymp.UCL_related, sep=" ") %>%
    unite("CI_MOP", asymp.LCL_.:asymp.UCL_., sep=" ") %>%
  rename(MOP = "emmean_.", SE_MOP = "SE_.")

exp2_contrasts_glmm <- emmeans(exp2_mop_2x3glmer, pairwise ~ primetype_rec|condition_rec)$contrasts |> rbind() |> as_tibble() %>%
  select(condition_rec, z.ratio:p.value) %>% mutate(across(p.value, \(x) signif(x, 3)))

exp2_estimates_glmm <- exp2_emmeans_glmm %>%
  left_join(exp2_contrasts_glmm, by='condition_rec') %>%
  mutate(across(contains("CI"), \(x) paste0("[", x, "]"))) %>%
  rename(factor = 'condition_rec')

##### PRIMING CALCULATIONS
#### by subject
exp2_mop_subj <- exp2_rt.avg_subj %>% 
  pivot_wider(names_from=primetype_rec, values_from=meanRT) %>%
  mutate(priming = unrelated-related)

#### descriptive stats
exp2_gdavg_mop <- exp2_mop_subj %>%
  group_by(condition_rec) %>%
  summarise(MOP = mean(priming), se = sd(priming)/sqrt(n()), ci=(qt(0.975, n()-1)*se),
            sd=sd(priming), ES=round(MOP/sd, 2))

#### summary table
exp2_gdavg_mop_summary <- exp2_gdavg_mop %>%
  left_join(., exp2_mop.err_across, by='condition_rec') %>%
  mutate(across(c(2:5, 7:12), round), across(c(13), round, 2)) %>%
  mutate(ci.lb = paste0("[", MOP-ci), ci.ub = paste0(MOP+ci, "]")) %>%
  unite("CI", ci.lb:ci.ub, sep = " ") %>% select(-ci, -se) %>% 
  rename(factor = "condition_rec") %>%
  relocate(gd.mean_related:cor, .before=MOP) %>% 
  relocate(gd.mean_related, .after=gd.mean_unrelated) %>%
  relocate(CI, .after=MOP)
Warning: There was 1 warning in `mutate()`.
ℹ In argument: `across(c(13), round, 2)`.
Caused by warning:
! The `...` argument of `across()` is deprecated as of dplyr 1.1.0.
Supply arguments directly to `.fns` through an anonymous function instead.

  # Previously
  across(a:b, mean, na.rm = TRUE)

  # Now
  across(a:b, \(x) mean(x, na.rm = TRUE))

#### t-test
exp2_rt_stats_main <- exp2_rt.avg_subj %>%
  mutate(primetype_rec = fct_relevel(primetype_rec, "unrelated", "related")) %>%
  group_by(condition_rec) %>%
  t_test(meanRT ~ primetype_rec, paired=T) %>% select(-2:-6) %>%
  rename(factor = "condition_rec", t='statistic')

### INTERACTION EFFECT

#### descriptive stats
exp2_gdavg_fae <- exp2_mop_subj %>%
  select(-unrelated, -related) %>%
  pivot_wider(names_from="condition_rec", values_from="priming") %>%
  mutate(interaction = low-high) %>% ungroup() %>%
  summarise(factor = "frequency:primetype", 
            sd=sd(interaction), se = sd/sqrt(n()), ci = (qt(0.975, n()-1)*se)) %>%
  mutate(across(c(2:4), round))

##### summary table
exp2_gdavg_fae_summary <- exp2_mop_subj %>% 
  select(-unrelated, -related) %>% ungroup() %>%
  pivot_wider(names_from="condition_rec", values_from="priming") %>%
  summarise(mean_high = round(mean(high)), mean_low = round(mean(low)), cor=round(cor(high, low), 3)) %>%
  bind_cols(., exp2_gdavg_fae) %>%
  mutate(MOP = mean_low-mean_high, ES = round(MOP/sd, 2), ci.lb = paste0("[", MOP-ci), ci.ub = paste0(MOP+ci, "]")) %>%
  unite("CI", ci.lb:ci.ub, sep = " ") %>% select(-ci, -se) %>% 
  relocate(CI, .after=MOP) %>%
  relocate(factor, .before=mean_high)
  
#### t-test
exp2_rt_stats_interaction <- exp2_mop_subj %>% ungroup() %>%
  filter(condition_rec != 'non-word') %>%
  mutate(condition_rec = fct_relevel(condition_rec, "low", "high")) %>%
  t_test(priming ~ condition_rec, paired=TRUE) %>% 
  #mutate_if(is.numeric, list(~as.character(signif(., 3)))) %>%
  select(-1, -4:-5) %>%
  unite("factor", group1:group2) %>%
  rename(t='statistic') %>%
  mutate(factor = "frequency:primetype")

## estimates from the GLMM model
exp2_emmeans_glmm_fae <- confint(contrast(emmeans(exp2_mop_2x3glmer, ~ primetype_rec*condition_rec), interaction = c("pairwise", "pairwise"), adjust='bonferroni')) %>% slice(1) %>% select(estimate:SE, asymp.LCL, asymp.UCL) %>% 
  mutate(across(c(1, 3:4), \(x) x*-1),
         across(c(1:4), \(x) round(x,2))) %>%
  mutate(factor = "frequency*primetype") %>% rename(MOP = "estimate", SE_MOP = "SE") %>% 
  unite("CI_MOP", asymp.UCL:asymp.LCL, sep=" ") %>%
  relocate(CI_MOP, .after=SE_MOP) %>% relocate(factor, .before=MOP) %>% mutate(CI_MOP = paste0("[", CI_MOP, "]"))

exp2_z_glmm_fae <- contrast(emmeans(exp2_mop_2x3glmer, ~ primetype_rec*condition_rec), interaction = c("pairwise", "pairwise"), adjust='bonferroni') |>
  rbind() |> as_tibble() %>% slice(1) %>% select(z.ratio:p.value) %>% 
  mutate(across(c(1), \(x) x*-1)) %>%
  mutate(factor = "frequency*primetype") %>% relocate(factor, .before=z.ratio) 

exp2_estimates_glmm_fae <- left_join(exp2_emmeans_glmm_fae, exp2_z_glmm_fae, by='factor')

Stats summary

In [7]:
In [8]:

exp2_summary.results_mop <- merge(exp2_gdavg_mop_summary, exp2_rt_stats_main, by='factor')
exp2_summary.results_fae <- merge(exp2_gdavg_fae_summary, exp2_rt_stats_interaction, by='factor') |>
  select(-mean_high, -mean_low)

exp2_summary.results <- bind_rows(exp2_summary.results_mop, exp2_summary.results_fae)
  
exp2_summary.results %>%
  mutate(t = round(t, 2)) %>%
  relocate(c("sd_unrelated", "mean.error_unrelated"), .before=gd.mean_related) %>%
  gt() %>%
  cols_label(
    CI = "95% CI",
    contains("mean") ~ "mean",
    contains("sd") ~ "SD", 
    contains("error") ~ "Error (%)"
  ) %>%
  tab_spanner(
    label = "unrelated RT",
    columns = c(2:4)
  ) %>%
  tab_spanner(
    label = "repetition RT",
    columns = c(5:7)
  ) %>%
  tab_spanner(
    label = 'priming effects',
    columns = c(9:12)
  ) %>%
  tab_spanner(
    label = md("_t_-test"),
    columns = c(13:15)
  ) %>%
  cols_label(
    sd = md("SD~p~")
  ) %>%
  cols_label(
    t = md("_t_"),
    p = md("_p_"),
  ) %>%
   sub_missing(
    missing_text = " "
  )
Experiment 2. Summary of the word priming results within the ANOVA model. Legend. MOP: magnitude of priming.
factor
unrelated RT
repetition RT
cor
priming effects
t-test
mean SD Error (%) mean SD Error (%) MOP 95% CI SDp ES t df p
high 574 83 3 548 83 2 0.850 26 [24 28] 46 0.56 24.60 1923 2.27e-116
low 605 89 6 570 89 3 0.840 35 [33 37] 51 0.69 30.19 1923 3.51e-164
non-word 629 108 4 633 110 5 0.900 -4 [-6 -2] 50 -0.08 -3.53 1923 4.32e-04
frequency:primetype





0.024 9 [6 12] 68 0.13 6.07 1923 1.55e-09
In [9]:
In [10]:

exp2_summary.results_glmm <- bind_rows(exp2_estimates_glmm, exp2_estimates_glmm_fae)
  
exp2_summary.results_glmm %>%
  mutate(z.ratio = round(z.ratio, 2),
         p.value = "<.0001") %>%
  gt() %>%
  cols_label(
    contains("CI") ~ "95% CI",
    contains("emmean") ~ "mean",
    contains("SE") ~ "SE"
  ) %>%
  tab_spanner(
    label = "unrelated RT",
    columns = c(2:4)
  ) %>%
  tab_spanner(
    label = "repetition RT",
    columns = c(5:7)
  ) %>%
  tab_spanner(
    label = 'priming effects',
    columns = c(8:10)
  ) %>%
   sub_missing(
    missing_text = " "
  )
Experiment 2. Summary of the word priming results based on the 2x3 GLMM model, with the same fixed and random effects as the 2x2 GLMM model, but including the non-word condition. Legend. MOP: magnitude of priming.
factor
unrelated RT
repetition RT
priming effects
z.ratio p.value
mean SE 95% CI mean SE 95% CI MOP SE 95% CI
high 605 0.95 [603 608] 579 0.83 [577 581] 26.00 0.80 [24 28] 33.03 <.0001
low 633 0.70 [631 635] 598 0.77 [596 600] 35.00 0.82 [32 37] 42.41 <.0001
non-word 650 0.96 [647 652] 653 0.92 [650 655] -3.00 0.71 [-5 -1] -4.28 <.0001
frequency*primetype





8.46 1.05 [5.94 10.98] 8.04 <.0001

Prediction intervals

In [11]:
In [12]:

exp1_summary.results <- structure(list(factor = c("high", "low", "non-word", "frequency:primetype"
), gd.mean_unrelated = c(573, 605, 623, NA), gd.mean_related = c(555, 
577, 625, NA), sd_related = c(85, 88, 103, NA), sd_unrelated = c(83, 
88, 103, NA), mean.error_related = c(2, 3, 4, NA), mean.error_unrelated = c(3, 
6, 4, NA), cor = c(0.86, 0.85, 0.91, 0.029), MOP = c(18, 28, 
-2, 10), CI = c("[16 20]", "[26 30]", "[-4 0]", "[7 13]"), sd = c(45, 
49, 43, 66), ES = c(0.41, 0.58, -0.05, 0.15), t = c("19.7", "27.8", 
"-2.33", "7.24"), df = c("2340", "2340", "2340", "2340"), p = c("2.88e-80", 
"1.52e-147", "0.0197", "5.86e-13")), row.names = c(NA, -4L), class = "data.frame")

exp1_high <- pi.m(M = exp1_summary.results$MOP[1], SD = exp1_summary.results$sd[1], n = as.numeric(exp1_summary.results$df[1]) + 1, rep.n = as.numeric(exp2_summary.results$df[1]) + 1)
exp1_low <- pi.m(M = exp1_summary.results$MOP[2], SD = exp1_summary.results$sd[2], n = as.numeric(exp1_summary.results$df[2]) + 1, rep.n = as.numeric(exp2_summary.results$df[2]) + 1)
exp1_nw <- pi.m(M = exp1_summary.results$MOP[3], SD = exp1_summary.results$sd[3], n = as.numeric(exp1_summary.results$df[3]) + 1, rep.n = as.numeric(exp2_summary.results$df[3]) + 1)
exp1_fae <- pi.m(M = exp1_summary.results$MOP[4], SD = exp1_summary.results$sd[4], n = as.numeric(exp1_summary.results$df[4]) + 1, rep.n = as.numeric(exp2_summary.results$df[4]) + 1)

pis <- data.frame(
         factor = c("high", "low", "non-word", "frequency:primetype"),
         M = c(exp1_summary.results$MOP[1], exp1_summary.results$MOP[2], exp1_summary.results$MOP[3], exp1_summary.results$MOP[4]),
         SD = c(exp1_summary.results$sd[1], exp1_summary.results$sd[2], exp1_summary.results$sd[3], exp1_summary.results$sd[4]),
         N = rep(c(as.numeric(exp1_summary.results$df[1]) + 1), 4),
         rep.n = rep(c(as.numeric(exp2_summary.results$df[1]) + 1), 4),
         pi.lb = c(exp1_high$lower_prediction_interval, 
                   exp1_low$lower_prediction_interval, 
                   exp1_nw$lower_prediction_interval,
                   exp1_fae$lower_prediction_interval),
         pi.ub = c(exp1_high$upper_prediction_interval, 
                   exp1_low$upper_prediction_interval, 
                   exp1_nw$upper_prediction_interval,
                   exp1_fae$upper_prediction_interval)
        )

pis %>% 
  mutate(across(c(6:7), round),
         pi.lb = paste0("[", pi.lb),
         pi.ub = paste0(pi.ub, "]")) %>%
  unite("95% PI", pi.lb:pi.ub, sep=" ") %>%
  gt() %>%
  cols_label(
    M = "mean",
    N = "experiment 1",
    rep.n = "experiment 2") %>%
  tab_spanner(
    label = "sample size",
    columns = c(4:5)
  )
Prediction intervals calculated on the means and standard deviations (SD) of the conditions tested in experiment 1.
factor mean SD
sample size
95% PI
experiment 1 experiment 2
high 18 45 2341 1924 [15 21]
low 28 49 2341 1924 [25 31]
non-word -2 43 2341 1924 [-5 1]
frequency:primetype 10 66 2341 1924 [6 14]