Below, I discuss some R code for illustrating how to implement the estimator for the partial area under the specificity-ROC curve proposed by Inácio de Carvalho et al. (2016), in a simulated data example. The code below depends on the package fda.usc (ver. 1.2.3), and thus the first thing you should do to replicate the numerical experiments below is to install fda.usc by typing in the R console install.packages("fda.usc"). This code has been written by Vanda Inácio de Carvalho (vanda.kinets@gmail.com).

Defining true functional covariate-adjusted pAUC and simulating data

Let’s start by computing the true values of the functional covariate-adjusted pAUC.

## R CODE FOR IMPLEMENTING OUR ESTIMATOR: A SIMULATED DATA EXAMPLE
require(fda.usc)
## 1) TRUE VALUES 
t <- seq(0, 1, by = 0.02)
nt <- length(t)

phi1 <- phi2 <- mu <- numeric(nt)

for(i in 1:nt) {
   phi1[i] <- sqrt(2) * sin(0.5 * pi * t[i])
   phi2[i] <- sqrt(2) * sin(1.5 * pi * t[i])
   mu[i] <- t[i] + sin(t[i])
}

phi1fd <- fdata(phi1, argvals = t)
phi2fd <- fdata(phi2, argvals = t)
beta <- phi1 + phi2
betafd <- fdata(beta, argvals = t)

lambda1 <- 4
lambda2 <- 1
z <- seq(-2 * sqrt(lambda1), 2 * sqrt(lambda1), by = 0.5)
nz <- length(z)

grid <- matrix(0, nrow = nz, ncol = nt)
for(j in 1:nz) {
  for(i in 1:nt) {
     grid[j, i] <- mu[i] + z[j] * phi1[i]
  }
}
gridfd <- fdata(grid, argvals = t)

rocspt <- function(p, j) {
    rocspt <- pnorm((3 + 1.5 * inprod.fdata(betafd, gridfd[j, ]) -
                     1.5 - inprod.fdata(betafd, gridfd[j, ])) + 2 *
                    qnorm(1 - p))
}
u <- 0
pauct <- numeric(nz)
for(j in 1:nz)
   pauct[j] <- integrate(rocspt, j = j, lower = u, upper = 1)$value
avspt <- pauct / (1 - u)   

Next, let’s simulate the functional covariate curves and the corresponding responses.

## 2) SIMULATE DATA
## Settings for simulating data
nsim <- 1000; nd <- 100; nnd <- 100

## Simulate covariate curves
Xd <- array(0, c(nd, nt, nsim))
Xnd <- array(0, c(nnd, nt, nsim))
Xdfd <- Xndfd <- list()

set.seed(123)
for(j in 1:nsim) {
  Sigma <- exp(-0.2 * abs(outer(t, t, "-")))
  XGd <- mvrnorm(nd, mu = rep(0, nt), Sigma)
    for(i in 1:nd) {
        Xd[i,, j] <- mu + XGd[i, ] + rnorm(1, 0, sqrt(lambda1)) *
            phi1 + rnorm(1, 0, sqrt(lambda2)) * phi2  
    }
  XGnd <- mvrnorm(nnd, mu = rep(0, nt), Sigma)
    for(l in 1:nnd) {
        Xnd[l,, j] <- mu + XGnd[l, ] + rnorm(1, 0, sqrt(lambda1)) *
            phi1 + rnorm(1, 0, sqrt(lambda2)) * phi2 
     }
  Xdfd[[j]] <- fdata(Xd[,, j], argvals = t)
  Xndfd[[j]] <- fdata(Xnd[,, j], argvals = t)
}

## Simulate responses
yd <- epsd <- matrix(0, nrow = nd, ncol = nsim)
ynd <- epsnd <- matrix(0,nrow=nnd,ncol = nsim)

for(j in 1:nsim) {
    epsd[, j] <- rnorm(nd, 0, 1)
    epsnd[, j] <- rnorm(nnd, 0, 1)
    for(i in 1:nd) {
        yd[i, j] <- 3 + 1.5 * inprod.fdata(betafd, Xdfd[[j]][i, ]) +
            2 * epsd[i, j]
    }
    for(l in 1:nnd) {
        ynd[l, j] <- 1.5 + inprod.fdata(betafd, Xndfd[[j]][l, ]) + epsnd[l, j]
    }
}

Here is how the covariate trajectories look like for the first simulated data set (i.e. Xdfd[[1]] and Xndfd[[1]]).