R version 3.2.4 (2016-03-10) -- "Very Secure Dishes" Copyright (C) 2016 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. Natural language support but running in an English locale R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ## code to reproduce Table 4 > ## THIS FILE: SIMEX simulations for n=1000 > library(splines) > set.seed(6053) > options(width=150) > sessionInfo() R version 3.2.4 (2016-03-10) Platform: x86_64-pc-linux-gnu (64-bit) Running under: Red Hat Enterprise Linux Server release 6.8 (Santiago) locale: [1] LC_CTYPE=en_US.UTF-8 LC_NUMERIC=C LC_TIME=en_US.UTF-8 LC_COLLATE=en_US.UTF-8 LC_MONETARY=en_US.UTF-8 [6] LC_MESSAGES=en_US.UTF-8 LC_PAPER=en_US.UTF-8 LC_NAME=C LC_ADDRESS=C LC_TELEPHONE=C [11] LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C attached base packages: [1] splines stats graphics grDevices utils datasets methods base > objfile <- "~/simex-vfunc/code-files-for-psychometrika-paper-aug2016/code-for-table4-n1000.Robj" > > ################################################ > ## function for deconvolution via NPMLE > ################################################ > > npmle_deconv <- function(W, csem, xmin = -5, xmax = +5, ngrid = 2000, lambda = 0.0005, lltol = 1e-7, psmall = 0.00005, discrete = FALSE, quietly = FALSE){ + ## nonparametric MLE of distribution of error-free variable "X" given + ## error-prone variable "W" where W = X + U and U|X ~ N(0, csem^2(X)) + ## + ## uses algorithm described in Rabe-Hesketh et al (2003). "Correcting for + ## covariate measurement error in logistic regression using nonparametric + ## maximum likelihood estimation." Statistical Modelling 3: 215-232. + ## + ## INPUTS: + ## W: vector of observed scores + ## csem: function of single variable x that returns conditional standard deviation of W|X=x. + ## needs to be able to accept a vector input and return vector output + ## xmin, xmax, ngrid: define grid over which X distribution is calculated + ## NOTE: default (xmin,xmax) are appropriate only for a standardized score; will need to be changed + ## for typical reporting scales for test scores + ## lambda: step in RH algorithm which is on probability scale - this seemed like a good value based on simulations + ## lltol: when we can't find a step that improves the likelihood by more than lltol, we stop + ## psmall: if a point evolves to have mass less than psmall we drop it + ## discrete: TODO for future version + ## quietly: if FALSE, prints iteration-by-iteration output + ## + ## VALUE: + ## list(.history = history of probability distributions, pu = dataframe containing summary info of last distribution) + stopifnot( all(!is.na(W)) && is.function(csem) && (xmin < xmax) && (lambda > 0) && (lltol > 0) ) + if(discrete){ + stop("discrete option to be implemented in future version") + } + if(min(W) < xmin){ + stop("xmin exceeds smallest data value, consider decreasing xmin") + } + if(max(W) > xmax){ + stop("largest data value exceeds xmax; consider increasing xmax") + } + + ## NOTE: we collapse over repeat W values when possible, using "counts" to contribute to likelihood + .W <- sort(unique(W)) + nW <- length(.W) + .counts <- sapply(.W, function(x){ sum(W == x) }) + stopifnot(sum(.counts) == length(W)) + W <- .W + + ## functions to map probabilities to reduced-dimension unconstrained scale, and inverse + theta_to_p <- function(theta){ + if(length(theta) == 0){ + return(1) + } else { + p <- exp(theta) / (1 + sum(exp(theta))) + return(c(p, 1-sum(p))) + } + } + + p_to_theta <- function(p){ + stopifnot( all(p > 0) && (abs((1 - sum(p))) < 1e-9) ) + if(length(p) == 1){ + return(numeric(0)) + } else { + last <- length(p) + return(log(p[1:(last-1)] * (1 + ((1-p[last])/p[last])) )) + } + } + + ## CHECKS: + ## p <- c(0.1, 0.2, 0.4, 0.3); print(ma(p - theta_to_p(p_to_theta(p)))) + ## theta <- c(-3.2, 1.0, -1.0); print(ma(theta - p_to_theta(theta_to_p(theta)))) + ## print(ma(1 - theta_to_p(p_to_theta(1)))) + ## p_to_theta(theta_to_p(numeric(0))) + + ## build (nW x ngrid) matrix of conditional densities if discrete == FALSE, or conditional probabilities if discrete == TRUE + grid <- seq(from=xmin, to=xmax, length=ngrid) + grid.csem <- csem(grid) + if(!discrete){ + fwx <- dnorm( matrix(W,ncol=ngrid,nrow=nW), mean=matrix(grid,ncol=ngrid,nrow=nW,byrow=T), sd=matrix(grid.csem,ncol=ngrid,nrow=nW,byrow=T)) + } else { ## uses pxgu - fwx[i,j] = p(W=W[i] | X = grid[j]) + tmp <- lapply(grid, function(u){ pxgu(u, csem(u), .W)}) + stopifnot(all(sapply(tmp, function(x){ all(x[,1] == .W) }))) + fwx <- do.call("cbind", lapply(tmp, function(x){ x[,2] })) + } + + ## negative log likelihood for a set of probabilities given ".inds" which are + ## indices of "grid" and which are continually updated + ## NOTE: updated to use counts + negll <- function(theta){ + -sum(.counts * log(as.vector(fwx[,.inds] %*% theta_to_p(theta)))) + } + + ## find initial best grid point. NOTE this will often want the X with the + ## biggest CSEM because a single point is inconsistent with most W unless the + ## CSEM is large. the exception is if we pick ugrid to be very large, then it + ## will find interior point. however even if it picks an extreme starting + ## point, that point will be dropped later if it is too far in the tails. + ll <- apply(fwx, 2, function(x){ sum(.counts * log(x)) }) + .inds <- which.max(ll) + .probs <- 1 + .eligible <- setdiff(1:ngrid, .inds) + ll.current <- ll[.inds] + + .history <- list() + .history[[1]] <- data.frame(x = grid[.inds], csem = grid.csem[.inds], p = 1, ll = ll.current, ex = grid[.inds], varx = 0) + + ## now add grid points per the algorithm until there is no improvement + done <- FALSE + while(!done){ + ## evaluate each eligible point with a weight "lambda" + part.static <- as.vector(fwx[,.inds,drop=FALSE] %*% ((1 - lambda)*.probs)) + part.total <- matrix(part.static, ncol=length(.eligible), nrow=nW) + (fwx[,.eligible] * lambda) + ll.candidate <- apply(part.total, 2, function(x){ sum(.counts * log(x)) }) + if(all(ll.candidate - ll.current < lltol)){ + done <- TRUE + } else { + w <- which.max(ll.candidate - ll.current) + .inds <- c(.inds, .eligible[w]) + .eligible <- setdiff(.eligible, .eligible[w]) + + ## set starting value: mass 0.05 at new value, normalized p on existing values + o <- optim(p_to_theta(c(0.95*(.probs/sum(.probs)), 0.05)), negll, method = "BFGS", control = list(maxit=1000, trace=5*(1 - as.numeric(quietly)), REPORT = 1)) + .probs <- theta_to_p(o$par) + ll.current <- -o$value + + ## sometimes we might have picked up an early grid point but now it has virtually no probability, so dump it + w <- which(.probs < psmall) + if(length(w) > 0){ + .probs <- .probs[-w] + .probs <- .probs/sum(.probs) + .inds <- .inds[-w] + } + + ## reorder the grid if need be + o <- order(.inds) + .inds <- .inds[o] + .probs <- .probs[o] + + ## summarize the state + .history[[length(.history)+1]] <- data.frame(x = grid[.inds], + csem = grid.csem[.inds], + p = .probs, + ll = ll.current, + ex = sum(grid[.inds] * .probs), + varx = sum(grid[.inds]^2 * .probs) - (sum(grid[.inds] * .probs))^2) + if(!quietly){ + print(.history[[length(.history)]]) + cat("\n\n\n") + } + } + } + return(list(.history = .history, px = .history[[length(.history)]])) + } > > expit<-function(x){ + v<-exp(x) + v/(1+v) + } > > ################################################ > ## initial work on CSEM function from real data > ################################################ > > ## define CSEM function corresponding to Figure 1: > case <- data.frame(score = c(232,236,239,242,245,248,251,253,256,258,260,263,265,267,269,271,273,275,277,279,281,283,285,286,288,290,292,294,296,298,300,302,304,306,309,311,313,316,318,321,324,327,330,333,337,341,350,351,357,365,376,393,423), csem = c(10,9,9,9,8,8,8,8,8,8,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,9,9,9,10,10,12,12,13,15,18,25,45)) > case$cvar <- case$csem^2 > > ## interpolate variance function and go flat outside range of data > varfunc <- approxfun(x = case$score, y = case$cvar, rule=2) > ## check: > ## x <- seq(from=0.8*min(case$score), to = 1.2*max(case$score), length=1000) > ## plot(x, varfunc(x), type="l") > ## points(case$score, case$cvar, col="blue") > > ## using raw achievement data (which cannot be posted) and deconvolution methods > ## we estimated X in the population had mean 334.3 and standard deviation 32.8: > mux <- 334.3 > sdx <- 32.8 > > ################################################ > ## use Monte Carlo methods to approximate A(W) > ################################################ > xgrid <- seq(from=min(case$score), to=max(case$score), length=200) > nreps <- 30000 > d <- data.frame(x=rep(xgrid, each=nreps)) > d$target <- varfunc(d$x) > > d$w <- d$x + rnorm(nrow(d), mean=0, sd = sqrt(d$target)) > s <- bs(d$w, df=10, intercept=TRUE) > colnames(s) <- paste("phi",colnames(s),sep="") > > b <- aggregate(s, by=list(x=d$x), FUN=mean) > b <- merge(b, unique(d[,c("x","target")]), all.x=TRUE) > > mod <- lm(as.formula(paste("target ~", paste(colnames(s), collapse=" + "), "-1")), data=b) > d$fit <- s %*% coef(mod) > > A1 <- approxfun(x = d$w, y = d$fit, rule=2) > > ################################################ > ## SIMEX simulation > ################################################ > n <- 1000 > nsim <- 500 > B <- 250 > lambda <- seq(from=0, to=2, length=20) > nlambda <- length(lambda) > nparam <- 1 > > bigres <- vector(nsim, mode="list") > > for(simi in 1:nsim){ + + ## generate x + d <- data.frame(x1 = rnorm(n, mean = mux, sd = sdx)) + d$x1std <- (d$x1 - mux) / sdx + + ## generate response + d$ps <- expit(0.0 - 1.5*d$x1std) + d$R <- as.numeric(runif(n) < d$ps) + + ## generate W according to varfunc + g1 <- varfunc + d$w1 <- d$x1 + rnorm(n, sd = sqrt(g1(d$x1))) + + ## get deconvolution estimate of distribution of X. note use rounded W to + ## speed up computations + d$w1round <- round(d$w1, digits=1) + pxhat <- npmle_deconv(d$w1round, csem = function(x){ sqrt(g1(x))}, xmin=min(d$w1round), xmax=max(d$w1round), quietly=TRUE)$px + + ## get pieces we need for various SIMEX + d$egx <- var(d$w1) - unique(pxhat$varx) + + ## conditional mean of X given W (so we can calculate g(E[X|W]) + f <- function(w){ + pxw <- dnorm(w, mean = pxhat$x, sd = sqrt(g1(pxhat$x))) * pxhat$p + pxw <- pxw / sum(pxw) + return( sum(pxhat$x * pxw) ) + } + d$exgivenw <- sapply(d$w1, f) + + ## E[g(X) | W] + f <- function(w){ + pxw <- dnorm(w, mean = pxhat$x, sd = sqrt(g1(pxhat$x))) * pxhat$p + pxw <- pxw / sum(pxw) + return( sum(g1(pxhat$x) * pxw) ) + } + d$egxgivenw <- sapply(d$w1, f) + + ## function to get logistic regression coefficient for response + esty <- function(whatcov){ + stopifnot(whatcov %in% names(d)) + d$reg.x1 <- (d[,whatcov] - mux) / sdx + coef(glm(R ~ reg.x1, data=d, family="binomial"))[2] + } + + ########################################################################### + ## infeasible SIMEX using unobserved true variance for each case + ########################################################################### + rL.gx <- vector(nlambda, mode="list") + + for(.l in 1:nlambda){ + tmp <- vector(B, mode="list") + for(.b in 1:B){ + d$w1star <- d$w1 + rnorm(n, sd = sqrt(lambda[.l] * g1(d$x1))) + tmp[[.b]] <- esty("w1star") + } + rL.gx[[.l]] <- apply(do.call("rbind", tmp), 2, mean) + } + rL.gx <- as.data.frame(do.call("rbind",rL.gx)) + + ########################################################################### + ## SIMEX using estimated average ME, ignoring heteroskedasticity + ########################################################################### + rL.avggx <- vector(nlambda, mode="list") + + for(.l in 1:nlambda){ + tmp <- vector(B, mode="list") + for(.b in 1:B){ + d$w1star <- d$w1 + rnorm(n, sd = sqrt(lambda[.l] * d$egx)) + tmp[[.b]] <- esty("w1star") + } + rL.avggx[[.l]] <- apply(do.call("rbind", tmp), 2, mean) + } + rL.avggx <- as.data.frame(do.call("rbind",rL.avggx)) + + ########################################################################### + ## SIMEX using naive estimate of average ME variance + ########################################################################### + rL.avggw <- vector(nlambda, mode="list") + + for(.l in 1:nlambda){ + tmp <- vector(B, mode="list") + for(.b in 1:B){ + d$w1star <- d$w1 + rnorm(n, sd = sqrt(lambda[.l] * mean(g1(d$w1)))) + tmp[[.b]] <- esty("w1star") + } + rL.avggw[[.l]] <- apply(do.call("rbind", tmp), 2, mean) + } + rL.avggw <- as.data.frame(do.call("rbind",rL.avggw)) + + ########################################################################### + ## SIMEX using g1(w) plug-in + ########################################################################### + rL.gw <- vector(nlambda, mode="list") + + for(.l in 1:nlambda){ + tmp <- vector(B, mode="list") + for(.b in 1:B){ + d$w1star <- d$w1 + rnorm(n, sd = sqrt(lambda[.l] * g1(d$w1))) + tmp[[.b]] <- esty("w1star") + } + rL.gw[[.l]] <- apply(do.call("rbind", tmp), 2, mean) + } + rL.gw <- as.data.frame(do.call("rbind",rL.gw)) + + ########################################################################### + ## SIMEX using g1(E[X|W]) + ########################################################################### + rL.gexw <- vector(nlambda, mode="list") + + for(.l in 1:nlambda){ + tmp <- vector(B, mode="list") + for(.b in 1:B){ + d$w1star <- d$w1 + rnorm(n, sd = sqrt(lambda[.l] * g1(d$exgivenw))) + tmp[[.b]] <- esty("w1star") + } + rL.gexw[[.l]] <- apply(do.call("rbind", tmp), 2, mean) + } + rL.gexw <- as.data.frame(do.call("rbind",rL.gexw)) + + ########################################################################### + ## SIMEX using A(W) + ########################################################################### + rL.aw <- vector(nlambda, mode="list") + + for(.l in 1:nlambda){ + tmp <- vector(B, mode="list") + for(.b in 1:B){ + d$w1star <- d$w1 + rnorm(n, sd = sqrt(lambda[.l] * A1(d$w1))) + tmp[[.b]] <- esty("w1star") + } + rL.aw[[.l]] <- apply(do.call("rbind", tmp), 2, mean) + } + rL.aw <- as.data.frame(do.call("rbind",rL.aw)) + + ########################################################################### + ## SIMEX using E[g(X)|W] + ########################################################################### + rL.rcal <- vector(nlambda, mode="list") + + for(.l in 1:nlambda){ + tmp <- vector(B, mode="list") + for(.b in 1:B){ + d$w1star <- d$w1 + rnorm(n, sd = sqrt(lambda[.l] * d$egxgivenw)) + tmp[[.b]] <- esty("w1star") + } + rL.rcal[[.l]] <- apply(do.call("rbind", tmp), 2, mean) + } + rL.rcal <- as.data.frame(do.call("rbind",rL.rcal)) + + ############################################################################ + ## collect estimates + ############################################################################ + res.ideal <- data.frame(method = "ideal", b1 = esty("x1"), stringsAsFactors=FALSE) + names(res.ideal) <- c("method","b1") + + res.naive <- data.frame(method = "naive", b1 = esty("w1"), stringsAsFactors=FALSE) + names(res.naive) <- c("method","b1") + + res.regcal <- data.frame(method = "regcalib", b1 = esty("exgivenw"), stringsAsFactors=FALSE) + names(res.regcal) <- c("method","b1") + + proj <- list(rL.gx = rL.gx, + rL.avggx = rL.avggx, + rL.avggw = rL.avggw, + rL.gw = rL.gw, + rL.gexw = rL.gexw, + rL.aw = rL.aw, + rL.rcal = rL.rcal) + + ## quadratic projections + res.rest <- do.call("rbind", lapply( proj, function(x){ + e <- rep(0,nparam) + for(i in 1:length(e)){ + ex <- data.frame(lam = lambda, e = x[,i]) + synth <- data.frame(lam = seq(from=-1, to=2, length=1000)) + mod2 <- lm(e ~ lam + I(lam^2), data=ex) + synth$ehat <- predict(mod2, newdata=synth) + e[i] <- synth$ehat[1] + } + e + })) + res.rest <- as.data.frame(res.rest) + names(res.rest) <- c("b1") + res.rest$method <- paste("simex2", sapply(strsplit(rownames(res.rest),"\\."), function(x){ x[2] }), sep="_") + res.rest <- res.rest[,c("method","b1")] + + ## quartic projections + res.rest4 <- do.call("rbind", lapply( proj, function(x){ + e <- rep(0,nparam) + for(i in 1:length(e)){ + ex <- data.frame(lam = lambda, e = x[,i]) + synth <- data.frame(lam = seq(from=-1, to=2, length=1000)) + mod4 <- lm(e ~ lam + I(lam^2) + I(lam^3) + I(lam^4), data=ex) + synth$ehat <- predict(mod4, newdata=synth) + e[i] <- synth$ehat[1] + } + e + })) + res.rest4 <- as.data.frame(res.rest4) + names(res.rest4) <- c("b1") + res.rest4$method <- paste("simex4", sapply(strsplit(rownames(res.rest4),"\\."), function(x){ x[2] }), sep="_") + res.rest4 <- res.rest4[,c("method","b1")] + + ## put the pieces together + res <- rbind(res.ideal, res.naive, res.regcal, res.rest, res.rest4) + res$sim <- simi + bigres[[simi]] <- list(res = res, projections = proj) + cat(paste("simi:",simi,"\n")) + } simi: 1 simi: 2 simi: 3 simi: 4 simi: 5 simi: 6 simi: 7 simi: 8 simi: 9 simi: 10 simi: 11 simi: 12 simi: 13 simi: 14 simi: 15 simi: 16 simi: 17 simi: 18 simi: 19 simi: 20 simi: 21 simi: 22 simi: 23 simi: 24 simi: 25 simi: 26 simi: 27 simi: 28 simi: 29 simi: 30 simi: 31 simi: 32 simi: 33 simi: 34 simi: 35 simi: 36 simi: 37 simi: 38 simi: 39 simi: 40 simi: 41 simi: 42 simi: 43 simi: 44 simi: 45 simi: 46 simi: 47 simi: 48 simi: 49 simi: 50 simi: 51 simi: 52 simi: 53 simi: 54 simi: 55 simi: 56 simi: 57 simi: 58 simi: 59 simi: 60 simi: 61 simi: 62 simi: 63 simi: 64 simi: 65 simi: 66 simi: 67 simi: 68 simi: 69 simi: 70 simi: 71 simi: 72 simi: 73 simi: 74 simi: 75 simi: 76 simi: 77 simi: 78 simi: 79 simi: 80 simi: 81 simi: 82 simi: 83 simi: 84 simi: 85 simi: 86 simi: 87 simi: 88 simi: 89 simi: 90 simi: 91 simi: 92 simi: 93 simi: 94 simi: 95 simi: 96 simi: 97 simi: 98 simi: 99 simi: 100 simi: 101 simi: 102 simi: 103 simi: 104 simi: 105 simi: 106 simi: 107 simi: 108 simi: 109 simi: 110 simi: 111 simi: 112 simi: 113 simi: 114 simi: 115 simi: 116 simi: 117 simi: 118 simi: 119 simi: 120 simi: 121 simi: 122 simi: 123 simi: 124 simi: 125 simi: 126 simi: 127 simi: 128 simi: 129 simi: 130 simi: 131 simi: 132 simi: 133 simi: 134 simi: 135 simi: 136 simi: 137 simi: 138 simi: 139 simi: 140 simi: 141 simi: 142 simi: 143 simi: 144 simi: 145 simi: 146 simi: 147 simi: 148 simi: 149 simi: 150 simi: 151 simi: 152 simi: 153 simi: 154 simi: 155 simi: 156 simi: 157 simi: 158 simi: 159 simi: 160 simi: 161 simi: 162 simi: 163 simi: 164 simi: 165 simi: 166 simi: 167 simi: 168 simi: 169 simi: 170 simi: 171 simi: 172 simi: 173 simi: 174 simi: 175 simi: 176 simi: 177 simi: 178 simi: 179 simi: 180 simi: 181 simi: 182 simi: 183 simi: 184 simi: 185 simi: 186 simi: 187 simi: 188 simi: 189 simi: 190 simi: 191 simi: 192 simi: 193 simi: 194 simi: 195 simi: 196 simi: 197 simi: 198 simi: 199 simi: 200 simi: 201 simi: 202 simi: 203 simi: 204 simi: 205 simi: 206 simi: 207 simi: 208 simi: 209 simi: 210 simi: 211 simi: 212 simi: 213 simi: 214 simi: 215 simi: 216 simi: 217 simi: 218 simi: 219 simi: 220 simi: 221 simi: 222 simi: 223 simi: 224 simi: 225 simi: 226 simi: 227 simi: 228 simi: 229 simi: 230 simi: 231 simi: 232 simi: 233 simi: 234 simi: 235 simi: 236 simi: 237 simi: 238 simi: 239 simi: 240 simi: 241 simi: 242 simi: 243 simi: 244 simi: 245 simi: 246 simi: 247 simi: 248 simi: 249 simi: 250 simi: 251 simi: 252 simi: 253 simi: 254 simi: 255 simi: 256 simi: 257 simi: 258 simi: 259 simi: 260 simi: 261 simi: 262 simi: 263 simi: 264 simi: 265 simi: 266 simi: 267 simi: 268 simi: 269 simi: 270 simi: 271 simi: 272 simi: 273 simi: 274 simi: 275 simi: 276 simi: 277 simi: 278 simi: 279 simi: 280 simi: 281 simi: 282 simi: 283 simi: 284 simi: 285 simi: 286 simi: 287 simi: 288 simi: 289 simi: 290 simi: 291 simi: 292 simi: 293 simi: 294 simi: 295 simi: 296 simi: 297 simi: 298 simi: 299 simi: 300 simi: 301 simi: 302 simi: 303 simi: 304 simi: 305 simi: 306 simi: 307 simi: 308 simi: 309 simi: 310 simi: 311 simi: 312 simi: 313 simi: 314 simi: 315 simi: 316 simi: 317 simi: 318 simi: 319 simi: 320 simi: 321 simi: 322 simi: 323 simi: 324 simi: 325 simi: 326 simi: 327 simi: 328 simi: 329 simi: 330 simi: 331 simi: 332 simi: 333 simi: 334 simi: 335 simi: 336 simi: 337 simi: 338 simi: 339 simi: 340 simi: 341 simi: 342 simi: 343 simi: 344 simi: 345 simi: 346 simi: 347 simi: 348 simi: 349 simi: 350 simi: 351 simi: 352 simi: 353 simi: 354 simi: 355 simi: 356 simi: 357 simi: 358 simi: 359 simi: 360 simi: 361 simi: 362 simi: 363 simi: 364 simi: 365 simi: 366 simi: 367 simi: 368 simi: 369 simi: 370 simi: 371 simi: 372 simi: 373 simi: 374 simi: 375 simi: 376 simi: 377 simi: 378 simi: 379 simi: 380 simi: 381 simi: 382 simi: 383 simi: 384 simi: 385 simi: 386 simi: 387 simi: 388 simi: 389 simi: 390 simi: 391 simi: 392 simi: 393 simi: 394 simi: 395 simi: 396 simi: 397 simi: 398 simi: 399 simi: 400 simi: 401 simi: 402 simi: 403 simi: 404 simi: 405 simi: 406 simi: 407 simi: 408 simi: 409 simi: 410 simi: 411 simi: 412 simi: 413 simi: 414 simi: 415 simi: 416 simi: 417 simi: 418 simi: 419 simi: 420 simi: 421 simi: 422 simi: 423 simi: 424 simi: 425 simi: 426 simi: 427 simi: 428 simi: 429 simi: 430 simi: 431 simi: 432 simi: 433 simi: 434 simi: 435 simi: 436 simi: 437 simi: 438 simi: 439 simi: 440 simi: 441 simi: 442 simi: 443 simi: 444 simi: 445 simi: 446 simi: 447 simi: 448 simi: 449 simi: 450 simi: 451 simi: 452 simi: 453 simi: 454 simi: 455 simi: 456 simi: 457 simi: 458 simi: 459 simi: 460 simi: 461 simi: 462 simi: 463 simi: 464 simi: 465 simi: 466 simi: 467 simi: 468 simi: 469 simi: 470 simi: 471 simi: 472 simi: 473 simi: 474 simi: 475 simi: 476 simi: 477 simi: 478 simi: 479 simi: 480 simi: 481 simi: 482 simi: 483 simi: 484 simi: 485 simi: 486 simi: 487 simi: 488 simi: 489 simi: 490 simi: 491 simi: 492 simi: 493 simi: 494 simi: 495 simi: 496 simi: 497 simi: 498 simi: 499 simi: 500 Warning messages: 1: In log(as.vector(fwx[, .inds] %*% theta_to_p(theta))) : NaNs produced 2: In log(as.vector(fwx[, .inds] %*% theta_to_p(theta))) : NaNs produced 3: In log(as.vector(fwx[, .inds] %*% theta_to_p(theta))) : NaNs produced 4: In log(as.vector(fwx[, .inds] %*% theta_to_p(theta))) : NaNs produced 5: In log(as.vector(fwx[, .inds] %*% theta_to_p(theta))) : NaNs produced > > save(bigres, file=objfile, compress=TRUE) > > ## now restrict to summary > bigres <- do.call("rbind", lapply(bigres, function(x){ x$res })) > > ## summaries > for(v in setdiff(names(bigres), c("method","sim"))){ + bigres[,paste(v,"mean",sep="_")] <- ave(bigres[,v], bigres$method, FUN=mean) + bigres[,paste(v,"sd",sep="_")] <- ave(bigres[,v], bigres$method, FUN=sd) + } > > print(sums <- unique(bigres[,c("method",names(bigres)[grep("_",names(bigres))])]), digits=3) method b1_mean b1_sd reg.x1 ideal -1.50 0.1050 reg.x11 naive -1.31 0.0981 reg.x12 regcalib -1.43 0.1023 rL.gx simex2_gx -1.51 0.1193 rL.avggx simex2_avggx -1.59 0.1312 rL.avggw simex2_avggw -1.63 0.1341 rL.gw simex2_gw -1.48 0.1133 rL.gexw simex2_gexw -1.49 0.1160 rL.aw simex2_aw -1.47 0.1133 rL.rcal simex2_rcal -1.51 0.1181 rL.gx1 simex4_gx -1.51 0.1321 rL.avggx1 simex4_avggx -1.65 0.1575 rL.avggw1 simex4_avggw -1.70 0.1610 rL.gw1 simex4_gw -1.45 0.1278 rL.gexw1 simex4_gexw -1.49 0.1332 rL.aw1 simex4_aw -1.45 0.1248 rL.rcal1 simex4_rcal -1.50 0.1282 > ## true value is -1.5 > > > > proc.time() user system elapsed 90192.160 24.666 90185.077