diff --git a/R/challengeR.R b/R/challengeR.R
index 69117af..8ef41fe 100644
--- a/R/challengeR.R
+++ b/R/challengeR.R
@@ -1,188 +1,189 @@
 #' Constructs a challenge object
 #'
 #' Constructs an S3 object to represent the configuration of an assessment data set originating from a benchmarking competition (so-called "challenge").
 #'
 #' @section Assessment data set:
 #' The toolkit provides visualization approaches for both challenges designed around a single task (single-task challenges) and for challenges comprising multiple tasks (multi-task challenges).
 #' For a single-task challenge, the assessment data set (argument \code{object}) requires the following columns:
 #' \itemize{
 #'   \item test case identifier (string or numeric)
 #'   \item algorithm identifier (string or numeric)
 #'   \item performance value (numeric)
 #' }
 #'
 #' For a multi-task challenge, the assessment data set (argument \code{object}) requires the following columns:
 #' \itemize{
 #'   \item task identifier (string or numeric)
 #'   \item test case identifier (string or numeric)
 #'   \item algorithm identifier (string or numeric)
 #'   \item performance value (numeric)
 #' }
 #'
 #' @section Sanity check:
 #' It is highly recommended that the sanity check is not disabled when the data set is provided initially.
 #' It checks that:
 #' \itemize{
 #'   \item performance values are numeric (if not, raises error)
 #'   \item algorithm performances are observed for all cases (if not, adds them as NA and emits a message)
 #'   \item cases appear only once for the same algorithm (if not, raises error)
 #' }
 #' If the argument \code{na.treat} for treatment of NA is specified, NAs will be handled respectively.
 #'
 #' It might be reasonable to disable the sanity check for further computations (e.g., for performance reasons
 #' during bootstrapping (\code{\link{bootstrap.ranked.list}}) where cases are actually allowed to appear more than once for the same algorithm).
 #'
 #' @param object A data frame containing the assessment data.
 #' @param case A string specifying the name of the column that contains the case identifiers.
 #' @param algorithm A string specifying the name of the column that contains the algorithm identifiers.
 #' @param value A string specifying the name of the column that contains the performance values.
 #' @param by A string specifying the name of the column that contains the task identifiers. Required for multi-task data set.
 #' @param taskName A string specifying the task name for single-task data set that does not contain a task column.
 #'   This argument is optional for a single-task data set and is ignored for a multi-task data set.
 #' @param annotator Not supported
 #' @param smallBetter A boolean specifying whether small performance values indicate better algorithm performance.
 #' @param na.treat Indicates how missing perfomance values are treated if sanity check is enabled. It can be 'na.rm', numeric value or function.
 #'   For a numeric value or function, NAs will be replaced by the specified values. For 'na.rm', rows that contain missing values will be removed.
 #' @param check A boolean to indicate to perform a sanity check of the specified data set and arguments if set to \code{TRUE}.
 #'
 #' @return An S3 object to represent the configuration of an assessment data set.
 #'
 #' @examples
 #' # single-task data set
 #'
 #' # multi-task data set
 #'
 #' @export
 as.challenge=function(object,
                       case,
                       algorithm,
                       value,
                       by=NULL,
                       taskName=NULL,
                       annotator=NULL,
                       smallBetter=FALSE,
                       na.treat=NULL, # optional
                       check=TRUE) {
 
   object=as.data.frame(object[,c(value, algorithm, case, by, annotator)])
 
   # sanity checks
   if (check) {
-
+ 
     if (!is.null(by) && !is.null(taskName)) {
       warning("Argument 'taskName' is ignored for multi-task data set.")
     }
 
     # Add task column for data set without task column by using the specified task name.
     if (is.null(by) && !is.null(taskName)) {
       taskName <- trimws(taskName)
 
       if (taskName == "") {
         stop("Argument 'taskName' is empty.")
       }
 
       object <- cbind(task=taskName, object)
       by = "task"
     }
 
     # Add task column for data set without task column by using a dummy task name.
     if (is.null(by) && is.null(taskName)) {
       object <- cbind(task="dummyTask", object)
       by = "task"
     }
 
     object=splitby(object,by=by)
     object=lapply(object,droplevels)
     missingData = n.missing = list()
     for (task in names(object)) {
       if (!all(is.numeric(object[[task]][[value]]))) stop("Performance values must be numeric.")
 
       n.missing[[task]] <- sum(is.na(object[[task]][[value]])) # already missing before na.treat; for report
       if (n.missing[[task]]>0) message("Note: ", n.missing, " missing cases have been found in the data set.")
       # check for missing cases
         missingData[[task]]=object[[task]] %>%
           expand(!!as.symbol(algorithm),
                  !!as.symbol(case))%>%
           anti_join(object[[task]],
                     by=c( algorithm,case))
         if (nrow(missingData[[task]])>0) {
              if (length(object) == 1 ) { # single task
             message("Performance of not all algorithms has been observed for all cases. Therefore, missings have been inserted in the following cases:")
           } else { # multi task
             message("Performance of not all algorithms has been observed for all cases in task '",
                     task,
                     "'. Therefore, missings have been inserted in the following cases:")
 
           }
           print(as.data.frame(missingData[[task]]))
           object[[task]]=as.data.frame(object[[task]] %>%
                                          complete(task,
                                                   !!as.symbol(algorithm),
                                                   !!as.symbol(case)))
         }
       # check duplicate cases
          all1=apply(table(object[[task]][[algorithm]],
                            object[[task]][[case]]),
                      2,
                      function(x) all(x==1))
           if (!all(all1)) {
             n.duplicated <- sum(all1!=1)
 
             if (length(object) == 1 ) { # single task
               if (n.duplicated/length(all1) >= 1/5) { # at least a quarter of the cases is duplicated
                 stop ("The following case(s) appear(s) more than once for the same algorithm. Please revise. ",
                       "Or are you considering a multi-task challenge and forgot to specify argument 'by'?\n",
                       "Case(s): ",
                       paste(names(which(all1!=1)), collapse=", ")
                       )
               } else {
                 stop ("The following case(s) appear(s) more than once for the same algorithm. Please revise.\n",
                       "Case(s): ",
                       paste(names(which(all1!=1)), collapse=", ")
                       )
               }
             } else { # multi task
               stop ("The following case(s) appear(s) more than once for the same algorithm in task '",
                     task, "'. Please revise.\n",
                      "Case(s): ",
                     paste(names(which(all1!=1)), collapse=", ")
                     )
 
             }
           }
 
       if (!is.null(na.treat)) {
         if (is.numeric(na.treat)) object[[task]][,value][is.na(object[[task]][,value])]=na.treat
         else if (is.function(na.treat)) object[[task]][,value][is.na(object[[task]][,value])]=na.treat(object[[task]][,value][is.na(object[[task]][,value])])
         else if (is.character(na.treat) && na.treat=="na.rm") object[[task]]=object[[task]][!is.na(object[[task]][,value]),]
       }
     }
   }
   if (check==TRUE && (any(sapply(missingData, function(x) nrow(x))>0) |any(n.missing>0)))  {
     if (is.null(na.treat)) message("For aggregate-then-rank, na.treat will have to be specified. ",
-                                   "For rank-then-aggregate, missings will implicitly lead to the algorithm ranked last for the missing test case."
+                                   "For rank-then-aggregate, missings will implicitly lead to the algorithm ranked last for the missing test case.",
+                                   "na.treat obligatory if report is intended to be compiled."
                                )
     else if (is.numeric(na.treat)) message("All missings have been replaced by the value ", na.treat,".\n")
     else if (is.character(na.treat) && na.treat=="na.rm") message("All missings have been removed.")
     else if (is.function(na.treat)) {
       message("Missings have been replaced using function ")
       print(na.treat)
     }
   }
 
   if (check==TRUE){
     attr(object,"n.missing")=n.missing
     attr(object,"missingData")=missingData
   }
   attr(object,"na.treat")=na.treat
 
   attr(object,"algorithm")=algorithm
   attr(object,"value")=value
   attr(object,"case")=case
   attr(object,"annotator")=annotator
   attr(object,"by")=by
   attr(object,"smallBetter")=smallBetter
   attr(object,"check")=check
   class(object)=c("challenge", class(object))
   object
 }
diff --git a/R/report.R b/R/report.R
index 8f8467a..82f5308 100644
--- a/R/report.R
+++ b/R/report.R
@@ -1,167 +1,174 @@
 #' @export
 report <- function(object,...) UseMethod("report")
 
 #' @export
 report.default <- function(object, ...) stop("not implemented for this class")
 
 #' Generates a benchmarking report with bootstrapping results
 #'
 #' Generates a benchmarking report in PDF, HTML or Word format with bootstrapping results.
 #' It contains the rankings, plots of the raw assessment data and plots of the ranking stability.
 #' For multi-task challenges, it also contains plots of cross-task insights. If you are interested in
 #' the individual plots as separate files, set argument \code{clean} to \code{FALSE} and specify \code{fig.format}.
 #'
 #' @param object The ranked (bootstrapped) assessment data set.
 #' @param consensus The rank aggregation across tasks (consensus ranking). Only needed for a multi-task data set.
 #' @param file A string specifying the file name of the report. It allows for specifying the output file path as well,
 #'   otherwise the working directory is used. If \code{file} does not have a file extension, an extension will be automatically
 #'   added according to the output format given in \code{format}. If the argument is omitted, the report is created in a
 #'   temporary folder with file name "report".
 #' @param title A string specifying the title of the report.
 #' @param colors The color scheme that is applied to the plots.
 #' @param format A string specifying the format of the report. The options are "PDF", "HTML" or "Word".
 #' @param latex_engine A string specifying the LaTeX engine for producing PDF output. The Options are "pdflatex", "lualatex", and "xelatex".
 #' @param clean A boolean indicating whether intermediate files (e.g. individual plots) should be kept. Using \code{TRUE} will clean
 #'   intermediate files that are created during rendering.
 #' @param fig.format A vector of strings containing the file format of the figures that are not removed if \code{clean} is set to \code{FALSE}.
 #'   The options are "jpeg", "png" and "pdf", e.g. \code{fig.format = c("jpeg", "png", "pdf")}.
 #' @param dpi A positive integer specifying the resolution of the generated plot (\code{fig.format} "jpeg" or "png") in dots per inch (DPI).
 #' @param open A boolean specifying whether the report should be opened with the default system viewer after generation.
 #' @param ... Further arguments passed to or from other functions.
 #'
 #' @return
 #'
 #' @examples
 #' @export
 report.bootstrap.list=function(object,
                                consensus,
                                file,
                                title="<Challenge name>",
                                colors=default_colors,
                                format="PDF",
                                latex_engine="pdflatex",
                                clean=TRUE,
                                fig.format = NULL, # file format of figures if clean==FALSE, can be vector, e.g. fig.format=c('jpeg','png', 'pdf')
                                dpi = 150, # DPI, relevant for bitmaps if clean==FALSE and fig.format specified
                                open=TRUE,...){
 
+  # if any missing perforamnce value and na.treat not given in as.challenge stop
+  if (is.null(attr(object$data,"na.treat")) && 
+      any(sapply(object$data, 
+                 function(task) any(is.na(task[[attr(ranking$data,"value")]]))))) {
+    stop("Please specify na.treat in as.challenge().")
+  }
+  
   # Copy the report file to a temporary directory before processing it, in
   # case we don't have write permissions to the current working dir (which
   # can happen when deployed).
   if (missing(file)) tempReport <- file.path(tempdir(), "report.Rmd")
   else {
     a=strsplit(file,"/")[[1]]
     path=paste0(a[-length(a)],collapse="/")
     if (path=="") tempReport=file.path(paste0(strsplit(a[length(a)],
                                                        ".",
                                                        fixed=T)[[1]][1],".Rmd"))
     else tempReport=file.path(path,paste0(strsplit(a[length(a)],
                                                    ".",
                                                    fixed=T)[[1]][1],".Rmd"))
   }
   file.copy(file.path(system.file("appdir", package = "challengeR"),
                       "report.Rmd"),
             tempReport,
             overwrite = TRUE)
 
   if (length(object$matlist) > 1) {
     consensus = consensus
     isMultiTask = TRUE
   }
   else {
     consensus = NULL
     isMultiTask = FALSE
   }
 
   bootstrappingEnabled = TRUE
 
   if (is(object, "ranked.list")) {
     bootstrappingEnabled = FALSE
   }
 
   # Set up parameters to pass to Rmd document
   if (!is.null(fig.format) & format=="PDF") fig.format=c("pdf",fig.format)
   if (!is.null(fig.format) && fig.format[1]=="pdf" && format=="Word") fig.format <- c(fig.format[-1], fig.format[1]) # in word avoid use of pdf to be embedded in document
   params <- list(
     object=object,
     consensus=consensus,
     name=title,
     colors=colors,
     isMultiTask=isMultiTask,
     bootstrappingEnabled=bootstrappingEnabled,
     fig.format = fig.format,
     dpi = dpi
   )
 
   # Knit the document, passing in the `params` list, and eval it in a
   # child of the global environment (this isolates the code in the document
   # from the code in this app).
   out <- render(tempReport,
                 switch(
                   format,
                   PDF = pdf_document(number_sections=T,
                                      latex_engine=latex_engine),
                   HTML = html_document(number_sections=T),
                   Word = word_document(df_print="kable")
                   ),
                 params = params,
                 envir = new.env(parent = globalenv()),
                 clean = clean,
                 ...
   )
 
   if (!missing(file)){
     if (is.na(strsplit(file,".",fixed=T)[[1]][2])) file=paste0(file,
                                                                ".",
                                                                strsplit(out,".",fixed=T)[[1]][2])
     file.rename(out, file)
   } else file=out
 
   file.remove(tempReport)
 
   if (open) system(paste0('open "', file, '"'))
 }
 
 #' Generates a benchmarking report without bootstrapping results
 #'
 #' Generates a benchmarking report in PDF, HTML or Word format without bootstrapping results.
 #' It contains the rankings, plots of the raw assessment data and plots of the ranking stability.
 #' For multi-task challenges, it also contains plots of cross-task insights. If you are interested in
 #' the individual plots as separate files, set argument \code{clean} to \code{FALSE} and specify \code{fig.format}.
 #'
 #' @param object The ranked assessment data set.
 #' @param consensus The rank aggregation across tasks (consensus ranking). Only needed for a multi-task data set.
 #' @param file A string specifying the file name of the report. It allows for specifying the output file path as well,
 #'   otherwise the working directory is used. If \code{file} does not have a file extension, an extension will be automatically
 #'   added according to the output format given in \code{format}. If the argument is omitted, the report is created in a
 #'   temporary folder with file name "report".
 #' @param title A string specifying the title of the report.
 #' @param colors The color scheme that is applied to the plots.
 #' @param format A string specifying the format of the report. The options are "PDF", "HTML" or "Word".
 #' @param latex_engine A string specifying the LaTeX engine for producing PDF output. The Options are "pdflatex", "lualatex", and "xelatex".
 #' @param clean A boolean indicating whether intermediate files (e.g. individual plots) should be kept. Using \code{TRUE} will clean
 #'   intermediate files that are created during rendering.
 #' @param fig.format A vector of strings containing the file format of the figures that are not removed if \code{clean} is set to \code{FALSE}.
 #'   The options are "jpeg", "png" and "pdf",  e.g. \code{fig.format = c("jpeg", "png", "pdf")}.
 #' @param dpi A positive integer specifying the resolution of the generated plot (\code{fig.format} "jpeg" or "png") in dots per inch (DPI).
 #' @param open A boolean specifying whether the report should be opened with the default system viewer after generation.
 #' @param ... Further arguments passed to or from other functions.
 #'
 #' @return
 #'
 #' @examples
 #' @export
 report.ranked.list=function(object,
                             consensus,
                             file,
                             title="<Challenge name>",
                             colors=default_colors,
                             format="PDF",
                             latex_engine="pdflatex",
                             clean=TRUE,
                             fig.format = NULL, # file format of figures if clean=FALSE, can be vector, e.g. fig.format=c('jpeg','png', 'pdf')
                             dpi = 150, # DPI, relevant for bitmaps if clean==FALSE and fig.format specified
                             open=TRUE,
                             ...){
   report.bootstrap.list(object, consensus, file, title, colors, format, latex_engine, clean, fig.format, dpi, open, ...)
 }