This is an example of running simulations in the manuscript. Here we generate one simulation data from Weibull regression under balanced design and scenario 1. Other simulation design and scenarios can be run in the same way.
Please assign your working directory to your_directory below where the folder “Simulations” is saved using the following code by replacing “Your directory of ‘Simulations’ folder” with your own directory:
your_directory <- setwd(“Your directory of ‘Simulations’ folder”)
Please install the following packages if you haven’t:
#install.packages("survival")
#install.packages("BART")
#install.packages("randomForestSRC", repos = "https://cran.us.r-project.org")
#install.packages("tidyverse")
#install.packages("gbm")
# Alternatively, you can install the development version from GitHub:
#if (!requireNamespace("remotes")) {
# install.packages("remotes")
#}
#remotes::install_github("gbm-developers/gbm")
#install.packages("randomForest")
#install.packages("pec")
#install.packages()
#library(devtools)
#install_github("nchenderson/AFTrees")
We have generated one training data (train1.RData) and one test data (test.RData). Here we load the datasets from the “Simulation folder”. Or you can generate one training data and test data using code below in the comment lines:
# Load the training and test data that we have generated
load("prepared_simulation_data_example.RData") # loaded training data name: train1; loaded test data name: testdat
# example code for generating one training data
#source("simulation_design_survival_Weibull.R")
#train <- data_gen_censor(n=1000, p=10, PH=TRUE, censor = "30%", setting = 1, ex = "random",time.interest=15)
# example code for generating test data
#mydata <- data_gen_censor(n=10000, p=10, PH=TRUE, censor = "30%", setting = 1, ex = "random",time.interest=15)
If you load our prepared training and test data, please go to section 2 to run R-T, R-X, D-T and D-X.
If you generate training and test data using the code data_gen_censor() above. Please prepare training and test data using the following code in the comment lines for running D-T and D-X in Python.
# training data
#train1 <- train$data
#save(train1,file = paste0(your_directory,"/train1.RData"))
# test data
#testdat <- cbind(mydata$data, mydata$true.diff,mydata$true1, mydata$true0)
#colnames(testdat) <- c("V1","V2","V3","V4","V5","V6","V7","V8","V9","V10","Treatment","Time","Event","true.diff","true1","true0")
#save(testdat,file = paste0(your_directory,"/test.RData"))
R-T, R-X, B-T, B-X and Weibull true model are run in RStudio. Here we train R-T, R-X, B-T, B-X and Weibull true model in one training data and predict CATE on test data for an example.
source("rsf_TXlearners.R")
source("baft_TXlearner_fun_util.R")
source("baft_TXlearners.R")
source("weibull_true.R")
library(survival)
library(BART)
library(randomForestSRC)
library(tidyverse)
library(gbm)
library(randomForest)
library(pec)
library(AFTrees)
# time.interest
tt=15
testdat1 <- testdat[,which(names(testdat)%in%c("V1","V2","V3","V4","V5","V6","V7","V8","V9","V10","Treatment","Time","Event"))]
# track time costs of running each method once; time costs for running D-T and D-X were done using Python
# R-T
ptm <- proc.time()
rsf_T<-rsf_HTE_T(data=train1,testdat=testdat1,time.interest=tt)
proc.time() - ptm
## user system elapsed
## 26.785 0.175 27.067
# R-X
ptm <- proc.time()
rsf_X<-rsf_HTE_X(data=train1,testdat=testdat1,ntrees=1000,time.interest=tt)
proc.time() - ptm
## user system elapsed
## 5.903 0.081 5.997
# B-T
ptm <- proc.time()
baft_T<-baft_HTE_T(data=train1,testdat=testdat1,time.interest=tt)
## number of trees: 500
## Prior:
## k: 2.000000
## power and base for tree prior: 2.000000 0.950000
## use quantiles for rule cut points: 1
## data:
## number of training observations: 491
## number of test observations: 10000
## number of explanatory variables: 10
##
##
## Cutoff rules c in x<=c vs x>c
## Number of cutoffs: (var: number of possible c):
## (1: 100) (2: 100) (3: 100) (4: 100) (5: 100)
## (6: 100) (7: 100) (8: 1) (9: 1) (10: 1)
##
##
##
## Running mcmc loop:
## iteration: 100 (of 1100)
## iteration: 200 (of 1100)
## iteration: 300 (of 1100)
## iteration: 400 (of 1100)
## iteration: 500 (of 1100)
## iteration: 600 (of 1100)
## iteration: 700 (of 1100)
## iteration: 800 (of 1100)
## iteration: 900 (of 1100)
## iteration: 1000 (of 1100)
## iteration: 1100 (of 1100)
## time for loop: 120
##
## Tree sizes, last iteration:
## 2 3 4 3 4 2 3 2 2 2 4 1 2 2 3 2 2 3 2 2
## 2 2 2 3 3 2 3 2 4 1 2 3 3 2 3 2 2 3 5 3
## 2 3 2 5 2 3 3 2 2 4 2 2 3 2 3 4 3 2 4 2
## 2 2 2 3 2 3 2 2 3 3 4 2 1 2 2 2 2 2 2 5
## 3 2 2 2 2 3 3 2 2 2 2 2 2 2 2 3 2 2 3 2
## 2 2 2 2 2 3 3 3 3 3 1 2 2 2 1 2 2 4 2 2
## 2 3 6 4 2 4 3 3 2 2 3 2 2 4 3 2 2 2 3 3
## 2 2 2 2 2 1 2 2 2 2 2 2 2 4 2 3 3 2 3 2
## 4 2 3 4 4 2 4 2 2 2 3 3 1 3 2 2 1 2 3 4
## 2 3 2 3 3 2 3 3 4 2 2 1 4 2 2 3 2 3 2 2
## 2 2 2 3 2 1 2 3 2 3 2 1 4 2 2 4 2 3 3 3
## 3 3 1 3 2 2 2 2 3 4 4 2 2 2 3 2 5 4 4 3
## 3 2 2 2 3 3 3 3 2 4 3 3 2 3 3 3 2 3 1 2
## 2 2 2 2 4 3 5 2 3 2 2 4 3 2 2 2 3 2 1 2
## 3 2 2 2 2 4 2 2 4 4 2 2 1 3 3 2 2 2 3 3
## 2 3 2 2 4 2 2 4 3 2 3 2 2 3 4 3 3 2 3 2
## 3 2 1 3 2 4 3 2 2 2 2 2 2 2 6 2 2 3 2 2
## 2 2 2 2 2 3 3 4 3 4 3 6 3 3 2 2 1 2 2 2
## 2 4 2 2 3 3 2 3 2 2 3 2 2 2 3 4 2 2 3 3
## 3 1 1 2 4 2 2 2 2 2 3 3 2 1 4 1 3 2 2 3
## 4 3 2 1 2 3 2 2 3 3 3 3 2 3 1 2 3 2 2 2
## 2 1 2 3 2 1 3 4 2 2 4 2 2 2 3 1 4 2 3 2
## 2 2 2 3 2 3 4 2 4 3 3 3 5 3 1 2 2 3 2 3
## 3 1 3 2 3 2 2 3 2 3 3 2 2 3 2 2 3 4 2 3
## 3 3 3 2 4 2 1 2 3 3 2 2 2 2 3 3 4 2 2 2
## Variable Usage, last iteration (var:count):
## (1: 27) (2: 49) (3: 53) (4: 48) (5: 30)
## (6: 37) (7: 38) (8: 156) (9: 170) (10: 147)
##
##
## number of trees: 500
## Prior:
## k: 2.000000
## power and base for tree prior: 2.000000 0.950000
## use quantiles for rule cut points: 1
## data:
## number of training observations: 509
## number of test observations: 10000
## number of explanatory variables: 10
##
##
## Cutoff rules c in x<=c vs x>c
## Number of cutoffs: (var: number of possible c):
## (1: 100) (2: 100) (3: 100) (4: 100) (5: 100)
## (6: 100) (7: 100) (8: 1) (9: 1) (10: 1)
##
##
##
## Running mcmc loop:
## iteration: 100 (of 1100)
## iteration: 200 (of 1100)
## iteration: 300 (of 1100)
## iteration: 400 (of 1100)
## iteration: 500 (of 1100)
## iteration: 600 (of 1100)
## iteration: 700 (of 1100)
## iteration: 800 (of 1100)
## iteration: 900 (of 1100)
## iteration: 1000 (of 1100)
## iteration: 1100 (of 1100)
## time for loop: 119
##
## Tree sizes, last iteration:
## 2 2 4 2 2 2 2 2 4 2 3 3 2 2 3 6 1 4 3 3
## 2 4 3 4 2 2 2 2 2 2 3 2 2 2 3 2 4 2 3 3
## 3 2 3 2 2 2 2 3 3 2 2 4 2 2 3 3 2 3 2 1
## 3 2 5 2 3 2 3 2 2 3 3 5 1 2 3 5 2 2 2 2
## 2 3 2 3 2 2 3 3 6 3 2 2 2 2 2 2 4 2 2 4
## 2 4 2 2 2 3 3 1 2 2 2 3 2 2 3 2 3 2 3 4
## 4 3 2 4 1 3 4 2 2 3 2 4 4 3 2 3 3 2 3 2
## 3 3 3 3 1 1 3 2 2 2 2 4 3 3 2 2 2 1 2 2
## 3 2 2 2 3 3 2 2 3 1 2 2 2 4 4 3 4 3 4 2
## 2 3 2 2 2 2 2 3 4 3 2 3 2 1 2 2 2 2 2 3
## 3 5 3 2 3 2 2 2 1 5 4 3 2 3 2 2 2 2 2 2
## 2 2 2 2 3 3 2 4 2 2 2 2 3 2 2 3 3 2 2 2
## 3 3 4 2 2 2 2 2 2 3 2 2 2 1 4 2 4 4 2 2
## 3 3 3 2 2 2 3 3 2 2 2 4 3 1 2 3 2 2 2 2
## 2 2 2 3 2 2 2 1 2 3 3 2 1 2 2 2 1 2 3 2
## 1 2 2 3 1 2 2 2 2 3 4 3 2 3 1 3 2 3 3 2
## 2 3 2 2 2 2 2 3 2 3 3 3 2 3 4 2 2 3 2 2
## 3 2 4 2 2 2 3 3 2 2 2 2 3 2 2 3 2 3 2 3
## 2 7 3 3 2 3 3 3 3 2 2 2 2 2 2 2 3 2 2 2
## 3 3 3 2 4 2 2 3 2 2 2 2 2 2 1 3 2 2 3 3
## 2 3 2 2 4 2 2 1 2 2 1 2 2 2 2 2 2 3 3 3
## 4 2 3 6 3 2 2 5 2 2 2 2 4 2 2 2 2 5 3 2
## 4 2 3 2 2 2 1 2 2 3 4 2 2 2 2 2 3 3 2 3
## 3 2 2 4 2 2 2 2 2 2 3 2 2 3 1 3 2 5 1 2
## 1 2 1 2 2 2 1 1 2 3 2 2 3 5 2 3 2 5 2 3
## Variable Usage, last iteration (var:count):
## (1: 34) (2: 41) (3: 27) (4: 47) (5: 36)
## (6: 34) (7: 36) (8: 172) (9: 153) (10: 154)
proc.time() - ptm
## user system elapsed
## 424.510 13.899 439.539
# B-X
ptm <- proc.time()
baft_X<-baft_HTE_X(data=train1,testdat=testdat1,ntrees=1000,time.interest=tt)
## number of trees: 500
## Prior:
## k: 2.000000
## power and base for tree prior: 2.000000 0.950000
## use quantiles for rule cut points: 1
## data:
## number of training observations: 491
## number of test observations: 1000
## number of explanatory variables: 10
##
##
## Cutoff rules c in x<=c vs x>c
## Number of cutoffs: (var: number of possible c):
## (1: 100) (2: 100) (3: 100) (4: 100) (5: 100)
## (6: 100) (7: 100) (8: 1) (9: 1) (10: 1)
##
##
##
## Running mcmc loop:
## iteration: 100 (of 1100)
## iteration: 200 (of 1100)
## iteration: 300 (of 1100)
## iteration: 400 (of 1100)
## iteration: 500 (of 1100)
## iteration: 600 (of 1100)
## iteration: 700 (of 1100)
## iteration: 800 (of 1100)
## iteration: 900 (of 1100)
## iteration: 1000 (of 1100)
## iteration: 1100 (of 1100)
## time for loop: 29
##
## Tree sizes, last iteration:
## 2 2 3 2 3 3 2 3 2 2 2 1 3 3 5 3 2 5 2 2
## 2 2 2 4 4 4 3 3 2 5 2 2 3 4 4 5 2 2 3 3
## 3 2 1 2 3 2 1 2 2 2 2 2 2 4 2 2 2 3 2 2
## 2 2 3 3 2 2 2 3 2 2 2 2 3 1 3 2 2 2 3 2
## 2 2 3 3 4 2 4 2 2 2 2 3 1 3 2 3 4 2 2 3
## 2 4 2 2 1 3 2 2 2 3 3 4 3 3 2 2 3 2 4 3
## 2 3 3 3 2 2 3 4 2 2 1 2 2 2 2 2 3 2 2 2
## 2 2 3 2 3 2 2 3 1 4 2 2 3 6 2 2 2 2 2 2
## 4 2 2 2 2 4 2 1 3 3 2 2 2 3 2 4 2 2 3 2
## 3 2 2 3 2 4 2 3 4 3 2 3 2 2 2 3 2 2 2 3
## 2 2 3 2 2 2 2 2 3 2 2 3 2 2 3 2 2 2 4 2
## 2 2 2 3 3 2 6 5 2 2 3 2 2 2 3 2 2 2 4 4
## 3 4 3 2 3 3 3 2 2 1 4 2 2 2 2 4 4 2 4 3
## 5 3 3 2 3 3 2 2 2 2 4 2 3 2 2 2 3 2 2 2
## 3 2 3 2 2 2 2 2 2 4 3 2 2 3 2 3 2 2 2 1
## 1 2 2 1 3 2 4 2 1 4 1 3 3 3 2 3 2 2 2 3
## 2 3 2 3 4 3 3 3 1 3 2 2 2 3 2 2 2 3 3 3
## 3 2 2 2 2 2 2 2 2 4 4 2 3 2 3 2 3 3 3 2
## 2 2 3 3 2 1 2 2 3 2 2 4 2 2 3 3 3 3 2 2
## 2 2 3 2 5 2 2 2 3 2 2 3 2 3 2 2 2 2 2 2
## 1 3 2 2 3 2 3 2 2 3 1 3 2 3 2 2 2 3 3 2
## 2 5 2 2 4 3 2 4 2 2 2 2 1 2 2 2 3 2 3 2
## 2 2 2 2 2 3 3 2 3 2 2 4 2 4 2 2 3 3 2 3
## 3 2 4 1 3 2 4 2 2 3 3 2 2 2 2 3 4 3 2 3
## 4 2 2 3 3 2 2 2 3 1 3 2 2 3 2 2 3 3 2 1
## Variable Usage, last iteration (var:count):
## (1: 42) (2: 33) (3: 30) (4: 31) (5: 35)
## (6: 41) (7: 26) (8: 175) (9: 162) (10: 163)
##
##
## number of trees: 500
## Prior:
## k: 2.000000
## power and base for tree prior: 2.000000 0.950000
## use quantiles for rule cut points: 1
## data:
## number of training observations: 509
## number of test observations: 1000
## number of explanatory variables: 10
##
##
## Cutoff rules c in x<=c vs x>c
## Number of cutoffs: (var: number of possible c):
## (1: 100) (2: 100) (3: 100) (4: 100) (5: 100)
## (6: 100) (7: 100) (8: 1) (9: 1) (10: 1)
##
##
##
## Running mcmc loop:
## iteration: 100 (of 1100)
## iteration: 200 (of 1100)
## iteration: 300 (of 1100)
## iteration: 400 (of 1100)
## iteration: 500 (of 1100)
## iteration: 600 (of 1100)
## iteration: 700 (of 1100)
## iteration: 800 (of 1100)
## iteration: 900 (of 1100)
## iteration: 1000 (of 1100)
## iteration: 1100 (of 1100)
## time for loop: 32
##
## Tree sizes, last iteration:
## 2 2 2 2 2 2 3 3 2 2 4 2 1 2 3 4 3 3 2 2
## 2 3 2 2 2 3 2 2 2 3 2 2 2 3 4 3 2 2 2 2
## 2 4 2 2 2 3 2 2 2 2 2 3 2 2 3 2 3 2 3 2
## 4 2 2 3 3 2 2 2 4 2 3 3 3 4 3 3 3 3 3 2
## 4 2 2 2 1 2 2 2 3 4 2 2 2 2 3 3 3 3 3 3
## 5 2 4 3 2 2 2 2 3 3 2 2 2 4 3 2 2 2 2 2
## 2 2 2 3 3 2 2 3 4 2 1 3 2 3 2 2 3 2 4 2
## 3 2 3 2 3 2 2 2 2 3 2 3 2 3 2 2 2 2 5 1
## 3 2 3 2 2 4 3 2 4 2 2 2 2 2 4 2 2 3 2 3
## 3 3 1 3 2 5 2 4 2 3 2 2 2 4 1 2 2 2 4 2
## 3 3 4 2 2 1 3 2 3 2 2 2 4 4 3 2 2 2 2 3
## 2 2 3 2 2 2 2 3 2 4 2 2 2 2 2 2 3 3 2 2
## 4 2 3 2 2 2 2 2 2 2 3 2 3 3 3 2 2 3 1 2
## 1 3 4 2 2 3 2 2 2 2 4 3 3 2 2 3 3 2 2 2
## 4 3 1 2 2 3 3 3 2 2 2 2 2 2 4 1 2 2 3 2
## 2 1 1 5 2 4 3 2 2 2 2 3 2 5 2 4 2 3 2 2
## 2 2 2 2 2 1 4 2 3 2 2 2 2 3 3 2 2 2 3 3
## 2 4 3 3 2 3 2 2 2 3 2 1 2 2 2 3 2 2 2 2
## 2 3 4 3 2 2 2 2 3 4 2 2 3 2 3 3 3 2 2 3
## 2 2 4 3 3 3 2 2 3 3 4 5 2 1 2 2 2 2 3 2
## 2 2 2 2 3 2 3 4 4 1 2 3 2 2 2 2 2 3 2 2
## 3 2 2 2 2 2 2 4 2 2 2 2 2 2 2 2 3 2 2 2
## 3 2 4 2 3 2 2 6 2 2 2 2 3 2 1 2 3 3 2 2
## 4 2 2 2 3 3 3 3 2 3 2 2 2 3 2 3 4 5 3 2
## 2 2 2 2 2 2 2 2 2 2 4 2 3 3 2 2 6 2 2 1
## Variable Usage, last iteration (var:count):
## (1: 40) (2: 42) (3: 28) (4: 35) (5: 36)
## (6: 39) (7: 24) (8: 158) (9: 157) (10: 169)
proc.time() - ptm
## user system elapsed
## 81.653 1.532 83.436
# Weibull true model
weib.true<-weibull_true(data=train1,testdat=testdat1,time.interest=tt,setting=1)
# combine results
diff<-data.frame(rsf_T_diff=rsf_T$diff,rsf_X_diff=rsf_X$diff,baft_T_diff=baft_T$diff,baft_X_diff=baft_X$diff,weib_diff=weib.true$diff,true_diff=testdat$true.diff)
D-T and D-T are run using Python. We have run D-T and D-X using our prepared training and test data. Results “Tlearner_result.csv” and “Xlearner_result.csv” are saved in “/Simulations/prepared_simulation_data_example.Data”.
If you generate training and test data by yourself using the code data_gen_censor() in section 1, please run D-T and D-X in Python as the following:
Install the following Python modules if you haven’t: pyreadr, tensorflow, pandas, matplotlib, sklearn, numpy, multiprocessing.
These modules can be installed using command line as the following if you use Anaconda: conda activate venv. This step is to open the virtual environment of the Python version that you install. venv is the name of your virtual environment. If you run Python on server, you can open your virtual environment by typing the command line: source ~/bin/activate/venv . After entering your virtual environment, use the following command line to install Python modules: pip install module_name. If you install Python3, please type: pip3 install module_name. Every time you would like to run the .py files, please enter your virtual environment where these modules are installed.
Enter your virtual environment where these modules are installed.
Please put DNNSurv_Tlearner_1run_simulation.py, DNNSurv_Xlearner_1run_simulation.py, DNNSurve_TXlearner_fun_util.py, testdat.RData and train1.RData that you generate using data_gen_censor() into the same folder.
Set up the folder directory in line 2 of DNNSurv_Tlearner_1run_simulation.py and DNNSurv_Xlearner_1run_simulation.py.
Submit DNNSurv_Tlearner_1run_simulation.py and DNNSurv_Xlearner_1run_simulation.py to Python using command line: python DNNSurv_Xlearner_1run_simulation.py. You can add a & at the end if you are using server for nohup option.
For 100 runs, please visit “reproduce_simulation_tables_plots.Rmd” for submitting “DNNSurv_Tlearner_100runs_simulation.py” and “DNNSurv_Xlearner_100runs_simulation.py”.
In the output result file, the first 10000 rows are predicted CATE; the second 10000 rows are true CATE.
Here we get result “diff” after running R-T, R-X, B-T, and B-X in section 2 using our prepared training and test data. And we have run D-T and D-X in Python using our prepared training and test data. Results are saved as “Tlearner_result.RData” and “Xlearner_result.RData” under “prepared_simulation_data_example.RData”, which were loaded in previous section.
Load in D-T and D-X results:
D_T <- Tlearner_result[1:10000,]
D_X <- Xlearner_result[1:10000,]
random1.diff <- cbind(diff,D_T,D_X)
colnames(random1.diff) <- c("rsf_T_diff","rsf_X_diff" ,"baft_T_diff","baft_X_diff","weib_diff","true_diff","deepsurv_T_diff","deepsurv_X_diff")
Binned bias and RMSE:
RMSE_rsf_T<-RMSE_rsf_X<-RMSE_baft_T<-RMSE_baft_X<-RMSE_weib<-RMSE_dnnsurv_T<-RMSE_dnnsurv_X<-0
bias_rsf_T<-bias_rsf_X<-bias_baft_T<-bias_baft_X<-bias_weib<-bias_dnnsurv_T<-bias_dnnsurv_X<-0
dat<-random1.diff
dat<-dat[order(dat$true_diff),]
quant<-quantile(dat$true_diff,probs=seq(0,1,length.out=50))
for (j in 1:49){
datq<-dat[which(dat$true_diff>quant[j]&dat$true_diff<=quant[j+1]),]
bias_rsf_T<-bias_rsf_T+mean(datq$rsf_T_diff-datq$true_diff)
bias_rsf_X<-bias_rsf_X+mean(datq$rsf_X_diff-datq$true_diff)
bias_baft_T<-bias_baft_T+mean(datq$baft_T_diff-datq$true_diff)
bias_baft_X<-bias_baft_X+mean(datq$baft_X_diff-datq$true_diff)
bias_weib<-bias_weib+mean(datq$weib_diff-datq$true_diff)
bias_dnnsurv_T<-bias_dnnsurv_T+mean(datq$deepsurv_T_diff-datq$true_diff)
bias_dnnsurv_X<-bias_dnnsurv_X+mean(datq$deepsurv_X_diff-datq$true_diff)
RMSE_rsf_T<-RMSE_rsf_T+sqrt(mean((datq$rsf_T_diff-datq$true_diff)^2))
RMSE_rsf_X<-RMSE_rsf_X+sqrt(mean((datq$rsf_X_diff-datq$true_diff)^2))
RMSE_baft_T<-RMSE_baft_T+sqrt(mean((datq$baft_T_diff-datq$true_diff)^2))
RMSE_baft_X<-RMSE_baft_X+sqrt(mean((datq$baft_X_diff-datq$true_diff)^2))
RMSE_weib<-RMSE_weib+sqrt(mean((datq$weib_diff-datq$true_diff)^2))
RMSE_dnnsurv_T<-RMSE_dnnsurv_T+sqrt(mean((datq$deepsurv_T_diff-datq$true_diff)^2))
RMSE_dnnsurv_X<-RMSE_dnnsurv_X+sqrt(mean((datq$deepsurv_X_diff-datq$true_diff)^2))
}
bias_rsf_T=bias_rsf_T/50
bias_rsf_X=bias_rsf_X/50
bias_baft_T=bias_baft_T/50
bias_baft_X=bias_baft_X/50
bias_weib=bias_weib/50
bias_dnnsurv_T=bias_dnnsurv_T/50
bias_dnnsurv_X=bias_dnnsurv_X/50
RMSE_rsf_T=RMSE_rsf_T/50
RMSE_rsf_X=RMSE_rsf_X/50
RMSE_baft_T=RMSE_baft_T/50
RMSE_baft_X=RMSE_baft_X/50
RMSE_weib=RMSE_weib/50
RMSE_dnnsurv_T=RMSE_dnnsurv_T/50
RMSE_dnnsurv_X=RMSE_dnnsurv_X/50
Binned bias of R-T, R-X, B-T, B-X, D-T, D-X and Weibull true model:
bias_rsf_T
## [1] 0.02998801
bias_rsf_X
## [1] 0.02654071
bias_baft_T
## [1] -0.02011069
bias_baft_X
## [1] -0.01908705
bias_dnnsurv_T
## [1] -0.002021899
bias_dnnsurv_X
## [1] 0.0142581
bias_weib
## [1] -0.01135484
Binned RMSE of R-T, R-X, B-T, B-X, D-T, D-X and Weibull true model:
RMSE_rsf_T
## [1] 0.1329761
RMSE_rsf_X
## [1] 0.1265091
RMSE_baft_T
## [1] 0.194009
RMSE_baft_X
## [1] 0.1729791
RMSE_dnnsurv_T
## [1] 0.1194756
RMSE_dnnsurv_X
## [1] 0.1048905
RMSE_weib
## [1] 0.03750969