#Based on simcomprep_radar_goodfit.R # (which allows looping through parameter values but is less user-friendly) #------------------------------------------------------------------------------------------ options(digits=3) library(reshape) library(psych) #has phi function require(fmsb) #radarchart #------------------------------------------------------------------------------------------ op <- par(mar=c(1, 2, 2, 1),mfrow=c(2, 2)) #create 4 x 4 grid for output #Data to simulate: These are specified in row 3 of radarplot # The means (SDs) out of 28 for each combination were as follows: # Repetition, single propositional = 26.3 (1.90); # Repetition, bi-clausal = 24.9, (2.69); # Comprehension, single propositional = 22.9 (2.62); # Comprehension, bi-clausal = 17.6 (4.80). # Rank order rho for constructions = .95 # Correlation between subjects R and C = .08 # Phi coeff distribution by items: phi mean across subjects= .140 #------------------------------------------------------------------------------------------ # This version of program just runs 4 times with different combinations of parameters and # creates corresponding radar plots, A, B, C, D. # demomat holds the values for each run in one row demomat=matrix(c(95,60,100, 60,95,100, 60,60,100, 95,95,60),nrow=4,byrow=TRUE) #values for Pi, Ps and Pc for each run #------------------------------------------------------------------------------------------ for (figrun in 1:4){ #------------------------------------------------------------------------------------------ #Lower value of range for Pi loPi=demomat[figrun,1]/100 hiPi=1 #Upper value of range for Pi - this is currently held constant incPi=(hiPi-loPi)/6 #Increment allows each construction to differ in equal steps Pi=seq(loPi,hiPi,by=incPi) #this gives the range of difficulty for constructions #for harder version, amount to subtract from Pi, first for easy then for hard hardsub=c(0,.1) #Subtract .1 for hard items #------------------------------------------------------------------------------------------ #simulate 30 subjects, Ps indicates their individual language mastery loPs=demomat[figrun,2]/100 #lower value in range of Ps hiPs=1 incPs=(hiPs-loPs)/29 Ps=seq(loPs,hiPs,by=incPs) #range of values for Ps on this run #------------------------------------------------------------------------------------------ #for 30 subjects, comp-specific ability - loPc=demomat[figrun,3]/100 #lower value in range of Pc (if set to 1, then has no effect) hiPc=1 Pc=runif(30,loPc,hiPc) #This is randomly determined rather than seq, i.e. uncorrelated with Ps hardsub2=c(0,.2) #this one only applies to comp hard items; it is subtracted from Pc for hard items only #------------------------------------------------------------------------------------------ #myc will hold 0 and 1 values corresponding to individual items for individual children myc=array(data=NA,dim=c(2,7,2,4,30), dimnames = list( c("Rep", "Comp"), c("ClauseT1","ClauseT2","ClauseT3", "ClauseT4","ClauseT5","ClauseT6","ClauseT7"), c("Easy", "Hard"), c("i1","i2","i3","i4"), c(1:30) )) nrun=100 #Can vary this to get more precise estimates with more runs #bigmat will store the summary data from each run bigmat=matrix(data = NA, nrow = nrun, ncol = 16, byrow = FALSE) mynames=c("thisrun","loPi","hiPi","hardsub","hardsub2","loPs","hiPs","loPc","hiPc","meanRE","meanRH","meanCE","meanCH","rankcor","subcor","meanphi") colnames(bigmat)=mynames #------------------------------------------------------------------------- #START RUNS HERE for (thisrun in 1:nrun){ for (i in 1:2){ #recall vs comp for (j in 1:7){ #7 structures for (k in 1:2){ #easy vs hard for (ii in 1:4){ #4 items per condition for (l in 1:30){ #30 subjects myp=(Pi[j]-hardsub[k])*Ps[l] #use Pi and Ps to compute probability of correct thispc=Pc[l] #if value is one has no effect if ((i==2)&&(loPc<1)){thispc=thispc-hardsub2[k]} #if there is variance in Pc, subtract hardsub2 for harder if (i==2) {myp=myp*thispc} #Pc included for comp only myrand=runif(1,0,1) #random number from 0 to 1 to interpret probabilities thisc=0 if (myrand correct on .25 of trials for comp myc[i,j,k,ii,l]=thisc } } } } } #----------------------------------------------------------------------------------------------- #Now reorganise data and compute statistics on simulated data #Correlation between all R and all C RbyS=apply(myc,(c(1,5)),sum)#total sum for R and C by subjects RbyEbyS=apply(myc,c(1,3,5),sum) #total sum for R and C by difficulty and subjects subcor=cor(RbyS[1,],RbyS[2,]) #correlation of R and S, across subjects R4byS=cbind(RbyEbyS[1,1,],RbyEbyS[1,2,],RbyEbyS[2,1,],RbyEbyS[2,2,]) #row=sub and 4 cols for totals in 4 conditions R2byCE=apply(myc,(c(1,2,3)),sum) #Rep Comp in rows; clause totals in columns, with easy/hard in 3rd dimension R2byC=cbind(R2byCE[,,1],R2byCE[,,2]) #Rep Comp in rows; clause totals in 14 columns with easy first, then hard R2byCrank=rbind(rank(R2byC[1,]),rank(R2byC[2,])) #14 constructions Ranked for Rep (row1) and COmp (row2) rankcor=cor(R2byCrank[1,],R2byCrank[2,]) meanI=apply(RbyEbyS,c(1,2),mean) #means for easy/hard #----------------------------------------------------------------------------------------------- #compute phi coefficient for each subject myphi=rep(0,30) #initialise to zero for (mysub in 1:30){ mytot=rep(0,4) #4 cells to hold rep/comp combined score possibiities for one subject for (j in 1:7){ #7 structures for (k in 1:2){ #easy vs hard for (ii in 1:4){ #items dualscore= 10*myc[1,j,k,ii,mysub]+myc[2,j,k,ii,mysub] #converts to 11, 10, 01, 00 mycell=1 if (dualscore==10){mycell=2} if (dualscore==1){mycell=3} if (dualscore==0){mycell=4} mytot[mycell]=mytot[mycell]+1 #for 56 items counts how many are 11, 10, 01 and 00 for this subject } } } myphi[mysub]=phi(mytot,digits=3) } meanphi=mean(myphi) #------------------------------------------------------------------------------------------------ # Save parameters and summary results for each run in one row of bigmat bigmat[thisrun,1]=thisrun bigmat[thisrun,2]=loPi bigmat[thisrun,3]=hiPi bigmat[thisrun,4]=hardsub[2] bigmat[thisrun,5]=hardsub2[2] bigmat[thisrun,6]=loPs bigmat[thisrun,7]=hiPs bigmat[thisrun,8]=loPc bigmat[thisrun,9]=hiPc bigmat[thisrun,10]=meanI[1,1] bigmat[thisrun,11]=meanI[1,2] bigmat[thisrun,12]=meanI[2,1] bigmat[thisrun,13]=meanI[2,2] bigmat[thisrun,14]= rankcor bigmat[thisrun,15]=subcor bigmat[thisrun,16]=meanphi } # Write data to default folder as tab-separated text write.table(bigmat, file = "mybigmat.txt", append = TRUE, sep = "\t", eol = "\n", na = "NA", row.names = FALSE, col.names = TRUE) #------------------------------------------------------------------------------------------------ #now do radar plot: see http://www.inside-r.org/packages/cran/fmsb/docs/radarchart #start with specifying max, min and obtained value for each dimension, then simulated value myradar <- data.frame( Structure_correl=c(1, -.1,.95,mean(bigmat[,14])), RepEasy=c(28, 14,26.3,mean(bigmat[,10])), RepHard=c(28, 14,24.9,mean(bigmat[,11])), CompEasy=c(28, 14,22.9,mean(bigmat[,12])), CompHard=c(28, 14,17.6,mean(bigmat[,13])), Item_phi=c(1,-.1,.14,mean(bigmat[,16])), Child_correl=c(1,0-.1,.08,mean(bigmat[,15]))) #will look OK if exported as pdf 12 x 9 portrait colnames(myradar)=c("Correlation by structure","Rep. Easy","Rep. Hard","Comp. Easy","Comp. Hard","Items phi","Correlation by child") mycolor=c("black","red") mytitle=chartr("1234","ABCD",figrun) #neat way to specify correspondences and select one radarchart(myradar[1:4,], axistype=2, pcol=mycolor, pty=32,plty=c(1,1),plwd=c(2,1),seg=4, pdensity=c(0, 50), pangle=c(10, 45), pfcol=mycolor, axislabcol=1,cglcol="black",palcex=0.75, title=mytitle) }