Training fidelity

Training engagement across groups

# Select screening variables
  qn_data_train_time = qn_data %>% select(group, T_Self_Report, train_weeks)

# Rename groups
  qn_data_train_time$group = dplyr::recode(qn_data_train_time$group, 
                         CG = "Controls", TG = "Zirkus Empathico")

# Apply labels
  qn_data_train_time = apply_labels(qn_data_train_time,
                      T_Self_Report = "Training time (min)",
                      sibs = "Training duration (weeks)")
# Prepare table
  train_time_table =
    tbl_summary(
      qn_data_train_time,
      by = group, # split table by group
      type = c(T_Self_Report, train_weeks) ~ "continuous",
      statistic = list(all_continuous() ~ "{mean} ({sd})", # descriptives definition
                     all_categorical() ~ "{n} / {N} ({p}%)"),
      digits = all_continuous() ~ 2,
      missing = "no" # don't list missing data separately
    ) %>%
    add_n() %>% # add column with total number of non-missing observations
    modify_header(label = "**Variable**") %>% # update the column header
    bold_labels()
  
# Print table 
  train_time_table
Variable N Controls, N = 381 Zirkus Empathico, N = 361
Training time (min) 71 323.76 (127.95) 351.21 (122.52)
train_weeks 74 7.09 (0.90) 6.98 (1.27)

1 Mean (SD)

# Correlation training time with Screen time
  #cor_qn_data = subset(qn_data, select = c(T_Self_Report, T_Screen_Time)
  #cor_qn_data =  na.omit(cor_qn_data)
  #cor.test(cor_qn_data$T_Screen_Time, cor_qn_data$T_Self_Report, method = c("pearson"))

Difference testing for Training duration (in weeks)

tadaa_t.test(qn_data_train_time, train_weeks, group, direction = "two.sided", paired = FALSE, 
    var.equal = TRUE, conf.level = 0.95, print = c("markdown"))

Table 1: Two Sample t-test with alternative hypothesis: \(\mu_1 \neq \mu_2\)

Diff \(\mu_1\) Controls \(\mu_2\) Zirkus Empathico t SE df \(CI_{95\%}\) p Cohen's d
0.11 7.09 6.98 0.43 0.26 72 (-0.4 - 0.62) .668 0.1



Difference testing for total training time (minutes)

tadaa_t.test(qn_data_train_time, T_Self_Report, group, direction = "two.sided", paired = FALSE, 
    var.equal = TRUE, conf.level = 0.95, print = c("markdown"))

Table 2: Two Sample t-test with alternative hypothesis: \(\mu_1 \neq \mu_2\)

Diff \(\mu_1\) Controls \(\mu_2\) Zirkus Empathico t SE df \(CI_{95\%}\) p Cohen's d
-27.45 323.76 351.21 -0.92 29.79 69 (-86.87 - 31.97) .36 -0.22



Parental involvement

tadaa_t.test(app_stats, Practiced_without_parent, group, direction = "two.sided", 
    paired = FALSE, var.equal = TRUE, conf.level = 0.95, print = c("markdown"))

Table 3: Two Sample t-test with alternative hypothesis: \(\mu_1 \neq \mu_2\)

Diff \(\mu_1\) Controls \(\mu_2\) Zirkus Empathico t SE df \(CI_{95\%}\) p Cohen's d
0.26 3.38 3.11 0.85 0.31 70 (-0.35 - 0.88) .396 0.2



# with(app_stats, aggregate(Practiced_without_parent ~ group, FUN = sd))

Long-term effects

# http://www.danieldsjoberg.com/gtsummary/articles/tbl_summary.html


  load.Rdata(filename="./data/qn_data_itt.Rdata", "qn_data_itt")

  qn_data_itt = subset(qn_data_itt, ID != "3" & ID != "6" & ID != "7" & ID != "9" & ID != "17" & ID != "21"
                       & ID != "26" & ID != "28" & ID != "29" & ID != "34" & ID != "35" & ID != "36" & ID != "39"
                       & ID != "40" & ID != "41" & ID != "42" & ID != "44" & ID != "46" & ID != "48" & ID != "49"
                       & ID != "50" & ID != "51" & ID != "54" & ID != "55"  & ID != "56" & ID != "62" & ID != "65"
                       & ID != "66" & ID != "68" & ID != "70" & ID != "71" & ID != "72" & ID != "73" & ID != "75"
                       & ID != "76")

# Select screening variables
  qn_data_demog = subset(qn_data_itt, select = c("group", "sex", "age"))


# Rename groups
  qn_data_demog$group = dplyr::recode(qn_data_demog$group, 
                         CG = "Controls", TG = "Zirkus Empathico")

# Apply labels
  qn_data_demog = apply_labels(qn_data_demog,
                      sex = "Sex",
                      age = "Age (years)")

# Prepare table
  demog_table =
    tbl_summary(
      qn_data_demog,
      by = group, # split table by group
      type = c(age) ~ "continuous",
      statistic = list(all_continuous() ~ "{mean} ({sd})", # descriptives definition
                     all_categorical() ~ "{n} / {N} ({p}%)"),
      digits = all_continuous() ~ 2,
      missing = "no" # don't list missing data separately
    ) %>%
    add_n() %>% # add column with total number of non-missing observations
    add_p() %>% # test for a difference between groups
    modify_header(label = "**Variable**") %>% # update the column header
    bold_labels()
  
# Print table 
  demog_table
Variable N Controls, N = 241 Zirkus Empathico, N = 171 p-value2
Sex 41 >0.9
Female 13 / 24 (54%) 9 / 17 (53%)
Male 11 / 24 (46%) 8 / 17 (47%)
Age (years) 41 4.87 (0.69) 5.08 (0.80) 0.4

1 n / N (%); Mean (SD)

2 Pearson's Chi-squared test; Wilcoxon rank sum exact test

Primary outcomes

GEM

# ANCOVA GEM
gem_lt_anc = aov_ez("ID", "GEM", qn_data_T3, between = c("group", "time"), covariate = c("GEM_Total_T1"), 
    observed = c("GEM_Total_T1"), factorize = FALSE, anova_table = list(correction = "none", 
        es = "pes"))

pander(gem_lt_anc$anova_table)
Anova Table (Type 3 tests)
  num Df den Df MSE F pes Pr(>F)
group 1 77 140.7 10.05 0.1154 0.002189
time 1 77 140.7 0.4229 0.005463 0.5174
GEM_Total_T1 1 77 140.7 13.71 0.1511 0.0003997
group:time 1 77 140.7 0.4229 0.005463 0.5174
# Calculate generalized eta square and 95% CI
eff_size_prep = aov_car(GEM ~ group * time + Error(ID/time), data = qn_data_T3)
ges_GEM = eta_squared(eff_size_prep, generalized = "group")

# Extract values and vonvert them to Cohen's d
GEM_d_group = 2 * (sqrt(ges_GEM$Eta2_generalized[1]/(1 - ges_GEM$Eta2_generalized[1])))
GEM_low_CI_group = 2 * (sqrt(ges_GEM$CI_low[1]/(1 - ges_GEM$CI_low[1])))
GEM_high_CI_group = 2 * (sqrt(ges_GEM$CI_high[1]/(1 - ges_GEM$CI_high[1])))

GEM_d_emo = 2 * (sqrt(ges_GEM$Eta2_generalized[2]/(1 - ges_GEM$Eta2_generalized[2])))
GEM_low_CI_emo = 2 * (sqrt(ges_GEM$CI_low[2]/(1 - ges_GEM$CI_low[2])))
GEM_high_CI_emo = 2 * (sqrt(ges_GEM$CI_high[2]/(1 - ges_GEM$CI_high[2])))

GEM_d_emo_gr = 2 * (sqrt(ges_GEM$Eta2_generalized[3]/(1 - ges_GEM$Eta2_generalized[3])))
GEM_low_CI_emo_gr = 2 * (sqrt(ges_GEM$CI_low[3]/(1 - ges_GEM$CI_low[3])))
GEM_high_CI_emo_gr = 2 * (sqrt(ges_GEM$CI_high[3]/(1 - ges_GEM$CI_high[3])))

d_T3 = c(GEM_d_emo, GEM_d_group, GEM_d_emo_gr)

low_T3 = c(GEM_low_CI_emo, GEM_low_CI_group, GEM_low_CI_emo_gr)

high_T3 = c(GEM_high_CI_emo, GEM_high_CI_group, GEM_high_CI_emo_gr)

eff_size_T3 = data.frame(d_T3, low_T3, high_T3)

pander(eff_size_T3)

EMK 3-6 EM P

EMK_EM_P_lt_anc = aov_ez("ID", "EMK_EM_P", qn_data_T3, between = c("group", "time"), 
    covariate = c("EMK_EM_P_T1"), observed = c("EMK_EM_P_T1"), factorize = FALSE, 
    anova_table = list(correction = "none", es = "pes"))

pander(EMK_EM_P_lt_anc$anova_table)
Anova Table (Type 3 tests)
  num Df den Df MSE F pes Pr(>F)
group 1 77 9.268 0.3967 0.005125 0.5307
time 1 77 9.268 0.1706 0.002211 0.6807
EMK_EM_P_T1 1 77 9.268 6.272 0.07532 0.01438
group:time 1 77 9.268 0.8654 0.01111 0.3551

Secondary outcomes

EMK 3-6 ER P

EMK_ER_P_lt_anc = aov_ez("ID", "EMK_ER_P", qn_data_T3, between = c("group", "time"), 
    covariate = c("EMK_ER_P_T1"), observed = c("EMK_ER_P_T1"), factorize = FALSE, 
    anova_table = list(correction = "none", es = "pes"))

pander(EMK_ER_P_lt_anc$anova_table)
Anova Table (Type 3 tests)
  num Df den Df MSE F pes Pr(>F)
group 1 77 1.37 1.367 0.01745 0.2459
time 1 77 1.37 0.2737 0.003542 0.6024
EMK_ER_P_T1 1 77 1.37 51.98 0.403 0.000000000332
group:time 1 77 1.37 0.5586 0.007202 0.4571

SDQ PB

# ANCOVA SDQ_PB

SDQ_PB_lt_anc = aov_ez("ID", "SDQ_PB", qn_data_T3, between = c("group", "time"), 
    covariate = c("SDQ_PB_T1"), observed = c("SDQ_PB_T1"), factorize = FALSE, anova_table = list(correction = "none", 
        es = "pes"))

pander(SDQ_PB_lt_anc$anova_table)
Anova Table (Type 3 tests)
  num Df den Df MSE F pes Pr(>F)
group 1 77 2.562 0.7285 0.009372 0.396
time 1 77 2.562 0.7119 0.00916 0.4014
SDQ_PB_T1 1 77 2.562 26.43 0.2555 0.000002016
group:time 1 77 2.562 0.9213 0.01182 0.3401

SDQ BP

# ANCOVA SDQ_Total

SDQ_total_lt_anc = aov_ez("ID", "SDQ_Total", qn_data_T3, between = c("group", "time"), 
    covariate = c("SDQ_Total_T1"), observed = c("SDQ_Total_T1"), factorize = FALSE, 
    anova_table = list(correction = "none", es = "pes"))

pander(SDQ_total_lt_anc$anova_table)
Anova Table (Type 3 tests)
  num Df den Df MSE F pes Pr(>F)
group 1 77 11 3.114 0.03887 0.08157
time 1 77 11 3.271 0.04075 0.07441
SDQ_Total_T1 1 77 11 23.72 0.2355 0.000005812
group:time 1 77 11 0.1658 0.002149 0.685

Correlations

Overall correlation

load.Rdata(filename = "./data/erp_data.Rdata", "erp_data")
load.Rdata(filename = "./data/qn_data_itt.Rdata", "qn_data_itt")

# De-select participants with sufficient EEG data
qn_data_itt = subset(qn_data_itt, ID != "30" & ID != "54" & ID != "68" & ID != "69" & 
    ID != "71" & ID != "72" & ID != "73")

qn_data_itt$P3_amp_hap = erp_data$P3_amp_hap
qn_data_itt$P3_amp_neu = erp_data$P3_amp_neu
qn_data_itt$P3_amp_ang = erp_data$P3_amp_ang


qn_data_itt$P3_amp_hap_neu = qn_data_itt$P3_amp_hap - qn_data_itt$P3_amp_neu
qn_data_itt$P3_amp_hap_ang = qn_data_itt$P3_amp_hap - qn_data_itt$P3_amp_ang


# Define correlation pairs of interest
vars_P3_amp_hap_neu = data.frame(v1 = names(qn_data_itt)[c(which(colnames(qn_data_itt) == 
    "P3_amp_hap_neu"))], v2 = names(qn_data_itt)[c(which(colnames(qn_data_itt) == 
    "EMK_EM_P_T2"), which(colnames(qn_data_itt) == "EMK_ER_P_T2"), which(colnames(qn_data_itt) == 
    "EMK_ER_CH_T2"), which(colnames(qn_data_itt) == "SDQ_PB_T2"), which(colnames(qn_data_itt) == 
    "SDQ_Total_T2"))])

vars_P3_amp_hap_ang = data.frame(v1 = names(qn_data_itt)[c(which(colnames(qn_data_itt) == 
    "P3_amp_hap_ang"))], v2 = names(qn_data_itt)[c(which(colnames(qn_data_itt) == 
    "EMK_EM_P_T2"), which(colnames(qn_data_itt) == "EMK_ER_P_T2"), which(colnames(qn_data_itt) == 
    "EMK_ER_CH_T2"), which(colnames(qn_data_itt) == "SDQ_PB_T2"), which(colnames(qn_data_itt) == 
    "SDQ_Total_T2"))])

# Combine all correlation pairs
vars_P3_behav = rbind(vars_P3_amp_hap_neu, vars_P3_amp_hap_ang)

# Correlate pairs of interest

corrs_P3_behav = do.call(rbind, mapply(corrFunc, vars_P3_behav[, 1], vars_P3_behav[, 
    2], MoreArgs = list(data = qn_data_itt), SIMPLIFY = FALSE))

corrs_P3_behav = subset(corrs_P3_behav, select = -c(var1))

# Print table
pander(corrs_P3_behav)
  var2 estimate p.value statistic
P3_amp_hap_neu EMK_EM_P_T2 0.1987 0.1069 1.635
P3_amp_hap_neu1 EMK_ER_P_T2 0.04643 0.7091 0.3747
P3_amp_hap_neu2 EMK_ER_CH_T2 0.004374 0.972 0.03526
P3_amp_hap_neu3 SDQ_PB_T2 0.1955 0.1129 1.607
P3_amp_hap_neu4 SDQ_Total_T2 -0.09241 0.457 -0.7482
P3_amp_hap_ang EMK_EM_P_T2 0.08566 0.4907 0.6931
P3_amp_hap_ang1 EMK_ER_P_T2 0.01737 0.889 0.1401
P3_amp_hap_ang2 EMK_ER_CH_T2 0.1621 0.1899 1.325
P3_amp_hap_ang3 SDQ_PB_T2 0.05376 0.6657 0.434
P3_amp_hap_ang4 SDQ_Total_T2 -0.1132 0.3616 -0.9188

Correlation by group

Controls

load.Rdata(filename = "./data/erp_data.Rdata", "erp_data")
load.Rdata(filename = "./data/qn_data_itt.Rdata", "qn_data_itt")

# De-select participants with sufficient EEG data
qn_data_itt = subset(qn_data_itt, ID != "30" & ID != "54" & ID != "68" & ID != "69" & 
    ID != "71" & ID != "72" & ID != "73")

qn_data_itt$P3_amp_hap = erp_data$P3_amp_hap
qn_data_itt$P3_amp_neu = erp_data$P3_amp_neu
qn_data_itt$P3_amp_ang = erp_data$P3_amp_ang

qn_data_itt$P3_amp_hap_neu = qn_data_itt$P3_amp_hap - qn_data_itt$P3_amp_neu
qn_data_itt$P3_ang_hap_ang = qn_data_itt$P3_amp_hap - qn_data_itt$P3_amp_ang

# Select group
qn_data_itt = subset(qn_data_itt, group == "CG")


# Define correlation pairs of interest
vars_P3_amp_hap_neu = data.frame(v1 = names(qn_data_itt)[c(which(colnames(qn_data_itt) == 
    "P3_amp_hap_neu"))], v2 = names(qn_data_itt)[c(which(colnames(qn_data_itt) == 
    "EMK_EM_P_T2"), which(colnames(qn_data_itt) == "EMK_ER_P_T2"), which(colnames(qn_data_itt) == 
    "EMK_ER_CH_T2"), which(colnames(qn_data_itt) == "SDQ_PB_T2"), which(colnames(qn_data_itt) == 
    "SDQ_Total_T2"))])

vars_P3_ang_hap_ang = data.frame(v1 = names(qn_data_itt)[c(which(colnames(qn_data_itt) == 
    "P3_ang_hap_ang"))], v2 = names(qn_data_itt)[c(which(colnames(qn_data_itt) == 
    "EMK_EM_P_T2"), which(colnames(qn_data_itt) == "EMK_ER_P_T2"), which(colnames(qn_data_itt) == 
    "EMK_ER_CH_T2"), which(colnames(qn_data_itt) == "SDQ_PB_T2"), which(colnames(qn_data_itt) == 
    "SDQ_Total_T2"))])

# Combine all correlation pairs
vars_P3_behav = rbind(vars_P3_amp_hap_neu, vars_P3_ang_hap_ang)

# Correlate pairs of interest

corrs_P3_behav = do.call(rbind, mapply(corrFunc, vars_P3_behav[, 1], vars_P3_behav[, 
    2], MoreArgs = list(data = qn_data_itt), SIMPLIFY = FALSE))

corrs_P3_behav = subset(corrs_P3_behav, select = -c(var1))

# Print table
pander(corrs_P3_behav)
  var2 estimate p.value statistic
P3_amp_hap_neu EMK_EM_P_T2 0.2138 0.2247 1.238
P3_amp_hap_neu1 EMK_ER_P_T2 -0.1501 0.3968 -0.8589
P3_amp_hap_neu2 EMK_ER_CH_T2 -0.1608 0.3636 -0.9217
P3_amp_hap_neu3 SDQ_PB_T2 0.3412 0.0483 2.053
P3_amp_hap_neu4 SDQ_Total_T2 0.2707 0.1215 1.591
P3_ang_hap_ang EMK_EM_P_T2 0.0005148 0.9977 0.002912
P3_ang_hap_ang1 EMK_ER_P_T2 -0.2183 0.2148 -1.266
P3_ang_hap_ang2 EMK_ER_CH_T2 0.1018 0.5665 0.5792
P3_ang_hap_ang3 SDQ_PB_T2 0.01743 0.9221 0.0986
P3_ang_hap_ang4 SDQ_Total_T2 0.2139 0.2245 1.239

Zirkus Empathico

load.Rdata(filename = "./data/erp_data.Rdata", "erp_data")
load.Rdata(filename = "./data/qn_data_itt.Rdata", "qn_data_itt")

# De-select participants with sufficient EEG data
qn_data_itt = subset(qn_data_itt, ID != "30" & ID != "54" & ID != "68" & ID != "69" & 
    ID != "71" & ID != "72" & ID != "73")

qn_data_itt$P3_amp_hap = erp_data$P3_amp_hap
qn_data_itt$P3_amp_neu = erp_data$P3_amp_neu
qn_data_itt$P3_amp_ang = erp_data$P3_amp_ang

qn_data_itt$P3_amp_hap_neu = qn_data_itt$P3_amp_hap - qn_data_itt$P3_amp_neu
qn_data_itt$P3_ang_hap_ang = qn_data_itt$P3_amp_hap - qn_data_itt$P3_amp_ang

# Select group
qn_data_itt = subset(qn_data_itt, group == "TG")


# Define correlation pairs of interest
vars_P3_amp_hap_neu = data.frame(v1 = names(qn_data_itt)[c(which(colnames(qn_data_itt) == 
    "P3_amp_hap_neu"))], v2 = names(qn_data_itt)[c(which(colnames(qn_data_itt) == 
    "EMK_EM_P_T2"), which(colnames(qn_data_itt) == "EMK_ER_P_T2"), which(colnames(qn_data_itt) == 
    "EMK_ER_CH_T2"), which(colnames(qn_data_itt) == "SDQ_PB_T2"), which(colnames(qn_data_itt) == 
    "SDQ_Total_T2"))])

vars_P3_ang_hap_ang = data.frame(v1 = names(qn_data_itt)[c(which(colnames(qn_data_itt) == 
    "P3_ang_hap_ang"))], v2 = names(qn_data_itt)[c(which(colnames(qn_data_itt) == 
    "EMK_EM_P_T2"), which(colnames(qn_data_itt) == "EMK_ER_P_T2"), which(colnames(qn_data_itt) == 
    "EMK_ER_CH_T2"), which(colnames(qn_data_itt) == "SDQ_PB_T2"), which(colnames(qn_data_itt) == 
    "SDQ_Total_T2"))])

# Combine all correlation pairs
vars_P3_behav = rbind(vars_P3_amp_hap_neu, vars_P3_ang_hap_ang)

# Correlate pairs of interest

corrs_P3_behav = do.call(rbind, mapply(corrFunc, vars_P3_behav[, 1], vars_P3_behav[, 
    2], MoreArgs = list(data = qn_data_itt), SIMPLIFY = FALSE))

corrs_P3_behav = subset(corrs_P3_behav, select = -c(var1))

# Print table
pander(corrs_P3_behav)
  var2 estimate p.value statistic
P3_amp_hap_neu EMK_EM_P_T2 0.08857 0.624 0.4951
P3_amp_hap_neu1 EMK_ER_P_T2 0.08842 0.6246 0.4942
P3_amp_hap_neu2 EMK_ER_CH_T2 0.04739 0.7934 0.2642
P3_amp_hap_neu3 SDQ_PB_T2 -0.03306 0.8551 -0.1842
P3_amp_hap_neu4 SDQ_Total_T2 -0.2598 0.1443 -1.498
P3_ang_hap_ang EMK_EM_P_T2 -0.05831 0.7472 -0.3252
P3_ang_hap_ang1 EMK_ER_P_T2 0.02507 0.8899 0.1396
P3_ang_hap_ang2 EMK_ER_CH_T2 0.1003 0.5785 0.5615
P3_ang_hap_ang3 SDQ_PB_T2 -0.1495 0.4064 -0.8417
P3_ang_hap_ang4 SDQ_Total_T2 -0.2502 0.1602 -1.439

Session info

# Get session info
sessionInfo()
R version 4.0.2 (2020-06-22)
Platform: x86_64-w64-mingw32/x64 (64-bit)
Running under: Windows 10 x64 (build 18362)

Matrix products: default

locale:
[1] LC_COLLATE=German_Germany.1252  LC_CTYPE=German_Germany.1252   
[3] LC_MONETARY=German_Germany.1252 LC_NUMERIC=C                   
[5] LC_TIME=German_Germany.1252    

attached base packages:
[1] stats     graphics  grDevices utils     datasets  methods   base     

other attached packages:
 [1] sjmisc_2.8.6        sjstats_0.18.1      sjPlot_2.8.7       
 [4] rstatix_0.7.0       RColorBrewer_1.1-2  psych_2.1.3        
 [7] plyr_1.8.6          ppcor_1.1           MASS_7.3-53.1      
[10] pander_0.6.3        jmv_1.2.23          ggpubr_0.4.0       
[13] GGally_2.1.1        foreign_0.8-81      esci_0.1.1         
[16] emmeans_1.5.5-1     eegUtils_0.5.0      corrplot_0.84      
[19] car_3.0-10          carData_3.0-4       broom_0.7.6        
[22] afex_0.28-1         lme4_1.1-26         Matrix_1.3-2       
[25] tadaatoolbox_0.17.0 miceadds_3.11-6     mice_3.13.0        
[28] gtsummary_1.4.0     expss_0.10.7        eeptools_1.2.4     
[31] XLConnect_1.0.3     forcats_0.5.1       stringr_1.4.0      
[34] dplyr_1.0.5         purrr_0.3.4         readr_1.4.0        
[37] tidyr_1.1.3         tibble_3.1.1        ggplot2_3.3.3      
[40] tidyverse_1.3.1     kableExtra_1.3.4   

loaded via a namespace (and not attached):
 [1] estimability_1.3     R.methodsS3_1.8.1    coda_0.19-4         
 [4] knitr_1.32           multcomp_1.4-16      R.utils_2.10.1      
 [7] data.table_1.14.0    rpart_4.1-15         generics_0.1.0      
[10] cowplot_1.1.1        TH.data_1.0-10       commonmark_1.7      
[13] proxy_0.4-25         future_1.21.0        webshot_0.5.2       
[16] xml2_1.3.2           lubridate_1.7.10     httpuv_1.5.5        
[19] assertthat_0.2.1     viridis_0.6.0        xfun_0.22           
[22] hms_1.0.0            jquerylib_0.1.3      rJava_0.9-13        
[25] evaluate_0.14        promises_1.2.0.1     fansi_0.4.2         
[28] dbplyr_2.1.1         readxl_1.3.1         DBI_1.1.1           
[31] tmvnsim_1.0-2        htmlwidgets_1.5.3    reshape_0.8.8       
[34] ellipsis_0.3.1       backports_1.2.1      signal_0.7-6        
[37] bookdown_0.21        insight_0.13.2       jmvcore_1.2.23      
[40] vctrs_0.3.7          sjlabelled_1.1.7     abind_1.4-5         
[43] withr_2.4.2          checkmate_2.0.0      vcd_1.4-8           
[46] mnormt_2.0.2         svglite_2.0.0        cluster_2.1.2       
[49] lazyeval_0.2.2       crayon_1.4.1         labeling_0.4.2      
[52] pkgconfig_2.0.3      nlme_3.1-152         vipor_0.4.5         
[55] nnet_7.3-15          rlang_0.4.10         globals_0.14.0      
[58] lifecycle_1.0.0      miniUI_0.1.1.1       sandwich_3.0-0      
[61] modelr_0.1.8         cellranger_1.1.0     matrixStats_0.58.0  
[64] lmtest_0.9-38        boot_1.3-27          zoo_1.8-9           
[67] reprex_2.0.0         base64enc_0.1-3      beeswarm_0.4.0      
[70] png_0.1-7            viridisLite_0.4.0    ini_0.3.1           
[73] parameters_0.13.0    rootSolve_1.8.2.1    shinydashboard_0.7.1
 [ reached getOption("max.print") -- omitted 90 entries ]