#sript to analyse the shared data from Learning Words With Unfamiliar Orthography:The Role of Cognitive Abilities #script prepared by Dr Marie-Josee Bisson library(ez); library(ggplot2); library(multcomp); library(nlme); library(pastecs); library(reshape); library(lme4); library(sjPlot); library(sjmisc); library(MASS); library(interactions); library(effectsize); library(car) setwd("~/OneDrive - De Montfort University/Lectureship_DMU/Predictors_Project_files/Data/R_analysis/Familiarity_analysis/For_Data_Sharing/Final") # load data file filename="Recall_data.txt" recall_data = read.table(filename,header=TRUE,sep="\t") # load stim info file this file includes phono estimates from comparative judgment filename="Phono_output.txt" item_info1 = read.table(filename,header=TRUE,sep="\t") # load stim info file this file includes ortho estimates from comparative judgment filename="Ortho_output.txt" item_info2 = read.table(filename,header=TRUE,sep="\t") #merging the two item info files item_info = merge(item_info1, item_info2, by=c("itemNumber")) #merging item and recall data data2 = merge(recall_data, item_info,by=c ("itemNumber")) head(data2) nrow(data2) # load individual differences file filename="Predictors_data.txt" predictors = read.table(filename,header=TRUE,sep="\t") data3=merge(data2, predictors, by=c("participant")) summary(data3) head(data3) nrow(data3) nrow(data2) nrow(predictors) nrow(recall_data) data3$DV = data3$Accuracy_recalculated data3$Z_vocab_Corr = scale(data3$vocab_Corr) #z-score transformed summary(data3) #final model (for model building see below) d.glmer = glmer(DV ~ memory*Ortho_estimate + phono*Ortho_estimate +Phono_estimate+ ZRE_vocab_corr + (1+ Ortho_estimate|participant) + (1+ZRE_vocab_corr|itemNumber), data=data3, family=binomial, control = glmerControl(optimizer = "bobyqa")) summary(d.glmer) #Checking collinearity vif(d.glmer) mean(vif(d.glmer)) 1/vif(d.glmer) #computing confidence intervals and extracting model coefficients Odds <- exp(fixef(d.glmer))##returns the odds rations for each predictor cc <- confint(d.glmer,parm="beta_", method="Wald") #calculates confidence intervals of the estimates (log odds) results<- summary(d.glmer)$coefficients #extract all the coefficients, z-values and p-values from the model results_CI <- cbind(results,Odds,cc)#puts it all in a table write.csv(results_CI, file = "coefficients from final model.csv")#exports the table #final model with unresidualised vocab test d.glmer_UnRes2 = glmer(DV ~ memory*Ortho_estimate + phono*Ortho_estimate +Phono_estimate+ Z_vocab_Corr + (1+ Ortho_estimate|participant) + (1 + Z_vocab_Corr|itemNumber), data=data3, family=binomial, control = glmerControl(optimizer = "bobyqa")) summary(d.glmer_UnRes2) vif(d.glmer_UnRes2) mean(vif(d.glmer_UnRes2)) 1/vif(d.glmer_UnRes2) #computing confidence intervals and extracting model coefficients Odds <- exp(fixef(d.glmer_UnRes2))##returns the odds rations for each predictor cc <- confint(d.glmer_UnRes2,parm="beta_", method="Wald") #calculates confidence intervals of the estimates (log odds) results<- summary(d.glmer_UnRes2)$coefficients #extract all the coefficients, z-values and p-values from the model results_CI <- cbind(results,Odds,cc)#puts it all in a table write.csv(results_CI, file = "coefficients from final model_unRes2.csv")#exports the table #####previous models that did not converge or had singular fit # Full model with interactions*Ortho_estimate and maximal structure #does it converge without optimiser? d.glmer2 = glmer(DV ~ memory*Ortho_estimate + phono*Ortho_estimate +Phono_estimate+ ZRE_vocab_corr + (1+ Ortho_estimate + Phono_estimate|participant) + (1+memory+phono+ZRE_vocab_corr|itemNumber), data=data3, family=binomial) summary(d.glmer2) #model does not converge #adding optimizer d.glmer3 = glmer(DV ~ memory*Ortho_estimate + phono*Ortho_estimate +Phono_estimate+ ZRE_vocab_corr + (1+ Ortho_estimate + Phono_estimate|participant) + (1+memory+phono+ZRE_vocab_corr|itemNumber), data=data3, family=binomial, control = glmerControl(optimizer = "bobyqa")) summary(d.glmer3) #fit is singular #rerunning without the random slope contributing the least amount of variance: phono by item d.glmer4 = glmer(DV ~ memory*Ortho_estimate + phono*Ortho_estimate +Phono_estimate+ ZRE_vocab_corr + (1+ Ortho_estimate + Phono_estimate|participant) + (1+memory+ZRE_vocab_corr|itemNumber), data=data3, family=binomial, control = glmerControl(optimizer = "bobyqa")) summary(d.glmer4) #fit is singular #removing next random slope with least variance: Phono_estimate by participant d.glmer5 = glmer(DV ~ memory*Ortho_estimate + phono*Ortho_estimate +Phono_estimate+ ZRE_vocab_corr + (1+ Ortho_estimate|participant) + (1+memory+ZRE_vocab_corr|itemNumber), data=data3, family=binomial, control = glmerControl(optimizer = "bobyqa")) summary(d.glmer5) #fit is still singular #removing next random slope with least variance: memory by item d.glmer6 = glmer(DV ~ memory*Ortho_estimate + phono*Ortho_estimate +Phono_estimate+ ZRE_vocab_corr + (1+ Ortho_estimate|participant) + (1+ZRE_vocab_corr|itemNumber), data=data3, family=binomial, control = glmerControl(optimizer = "bobyqa")) summary(d.glmer6) #model converges; interactions not significant but all main effects are