diff --git a/R/challengeR.R b/R/challengeR.R
index 6d9e9c2..45d53d0 100644
--- a/R/challengeR.R
+++ b/R/challengeR.R
@@ -1,207 +1,207 @@
 # Copyright (c) German Cancer Research Center (DKFZ)
 # All rights reserved.
 #
 # This file is part of challengeR.
 #
 # challengeR is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 2 of the License, or
 # (at your option) any later version.
 #
 # challengeR is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with challengeR. If not, see <https://www.gnu.org/licenses/>.
 
 #' Constructs a challenge object
 #'
 #' Constructs an S3 object to represent the configuration of an assessment data set originating from a benchmarking competition (so-called "challenge").
 #'
 #' @section Assessment data set:
 #' The toolkit provides visualization approaches for both challenges designed around a single task (single-task challenges) and for challenges comprising multiple tasks (multi-task challenges).
 #' For a single-task challenge, the assessment data set (argument \code{object}) requires the following columns:
 #' \itemize{
 #'   \item test case identifier (string or numeric)
 #'   \item algorithm identifier (string or numeric)
 #'   \item performance value (numeric)
 #' }
 #'
 #' For a multi-task challenge, the assessment data set (argument \code{object}) requires the following columns:
 #' \itemize{
 #'   \item task identifier (string or numeric)
 #'   \item test case identifier (string or numeric)
 #'   \item algorithm identifier (string or numeric)
 #'   \item performance value (numeric)
 #' }
 #'
 #' @section Sanity check:
 #' It is highly recommended that the sanity check is not disabled when the data set is provided initially.
 #' It checks that:
 #' \itemize{
 #'   \item performance values are numeric (if not, raises error)
 #'   \item algorithm performances are observed for all cases (if not, adds them as NA and emits a message)
 #'   \item cases appear only once for the same algorithm (if not, raises error)
 #' }
 #' If the argument \code{na.treat} for treatment of NA is specified, NAs will be handled respectively.
 #'
 #' It might be reasonable to disable the sanity check for further computations (e.g., for performance reasons
 #' during bootstrapping (\code{\link{bootstrap.ranked.list}}) where cases are actually allowed to appear more than once for the same algorithm).
 #'
 #' @param object A data frame containing the assessment data.
 #' @param case A string specifying the name of the column that contains the case identifiers.
 #' @param algorithm A string specifying the name of the column that contains the algorithm identifiers.
 #' @param value A string specifying the name of the column that contains the performance values.
 #' @param by A string specifying the name of the column that contains the task identifiers. Required for multi-task data set.
 #' @param taskName A string specifying the task name for single-task data set that does not contain a task column.
 #'   This argument is optional for a single-task data set and is ignored for a multi-task data set.
 #' @param annotator If multiple annotators annotated the test cases, a string specifying the name of the column that contains the annotator identifiers. Only applies to rang-then-aggregate. Use with caution: Currently not tested.
 #' @param smallBetter A boolean specifying whether small performance values indicate better algorithm performance.
 #' @param na.treat Indicates how missing perfomance values are treated if sanity check is enabled. It can be 'na.rm', numeric value or function.
 #'   For a numeric value or function, NAs will be replaced by the specified values. For 'na.rm', rows that contain missing values will be removed.
 #' @param check A boolean to indicate to perform a sanity check of the specified data set and arguments if set to \code{TRUE}.
 #'
 #' @return An S3 object to represent the configuration of an assessment data set.
 #'
 #' @examples
 #' # single-task data set
 #'
 #' # multi-task data set
 #'
 #' @export
 as.challenge=function(object,
                       case,
                       algorithm,
                       value,
                       by=NULL,
                       taskName=NULL,
                       annotator=NULL,
                       smallBetter=FALSE,
                       na.treat=NULL, # optional
                       check=TRUE) {
 
   object=as.data.frame(object[,c(value, algorithm, case, by, annotator)])
-
+  object[[algorithm]] <- as.factor(object[[algorithm]])
   # sanity checks
   if (check) {
  
     if (!is.null(by) && !is.null(taskName)) {
       warning("Argument 'taskName' is ignored for multi-task data set.")
     }
 
     # Add task column for data set without task column by using the specified task name.
     if (is.null(by) && !is.null(taskName)) {
       taskName <- trimws(taskName)
 
       if (taskName == "") {
         stop("Argument 'taskName' is empty.")
       }
 
       object <- cbind(task=taskName, object)
       by = "task"
     }
 
     # Add task column for data set without task column by using a dummy task name.
     if (is.null(by) && is.null(taskName)) {
       object <- cbind(task="dummyTask", object)
       by = "task"
     }
 
     object=splitby(object,by=by)
     object=lapply(object,droplevels)
     missingData = n.missing = list()
     for (task in names(object)) {
       if (!all(is.numeric(object[[task]][[value]]))) stop("Performance values must be numeric.")
 
       n.missing[[task]] <- sum(is.na(object[[task]][[value]])) # already missing before na.treat; for report
       if (n.missing[[task]]>0) message("Note: ", n.missing, " missing cases have been found in the data set.")
       # check for missing cases
         missingData[[task]]=object[[task]] %>%
           expand(!!as.symbol(algorithm),
                  !!as.symbol(case))%>%
           anti_join(object[[task]],
                     by=c( algorithm,case))
         if (nrow(missingData[[task]])>0) {
              if (length(object) == 1 ) { # single task
             message("Performance of not all algorithms has been observed for all cases. Therefore, missings have been inserted in the following cases:")
           } else { # multi task
             message("Performance of not all algorithms has been observed for all cases in task '",
                     task,
                     "'. Therefore, missings have been inserted in the following cases:")
 
           }
           print(as.data.frame(missingData[[task]]))
           object[[task]]=as.data.frame(object[[task]] %>%
                                          complete(!!as.symbol(by),
                                                   !!as.symbol(algorithm),
                                                   !!as.symbol(case)))
         }
       # check duplicate cases
          all1=apply(table(object[[task]][[algorithm]],
                            object[[task]][[case]]),
                      2,
                      function(x) all(x==1))
           if (!all(all1)) {
             n.duplicated <- sum(all1!=1)
 
             if (length(object) == 1 ) { # single task
               if (n.duplicated/length(all1) >= 1/5) { # at least a quarter of the cases is duplicated
                 stop ("The following case(s) appear(s) more than once for the same algorithm. Please revise. ",
                       "Or are you considering a multi-task challenge and forgot to specify argument 'by'?\n",
                       "Case(s): ",
                       paste(names(which(all1!=1)), collapse=", ")
                       )
               } else {
                 stop ("The following case(s) appear(s) more than once for the same algorithm. Please revise.\n",
                       "Case(s): ",
                       paste(names(which(all1!=1)), collapse=", ")
                       )
               }
             } else { # multi task
               stop ("The following case(s) appear(s) more than once for the same algorithm in task '",
                     task, "'. Please revise.\n",
                      "Case(s): ",
                     paste(names(which(all1!=1)), collapse=", ")
                     )
 
             }
           }
 
       if (!is.null(na.treat)) {
         if (is.numeric(na.treat)) object[[task]][,value][is.na(object[[task]][,value])]=na.treat
         else if (is.function(na.treat)) object[[task]][,value][is.na(object[[task]][,value])]=na.treat(object[[task]][,value][is.na(object[[task]][,value])])
         else if (is.character(na.treat) && na.treat=="na.rm") object[[task]]=object[[task]][!is.na(object[[task]][,value]),]
       }
     }
   }
   if (check==TRUE && (any(sapply(missingData, function(x) nrow(x))>0) |any(n.missing>0)))  {
     if (is.null(na.treat)) message("For aggregate-then-rank, na.treat will have to be specified. ",
                                    "For rank-then-aggregate, missings will implicitly lead to the algorithm ranked last for the missing test case.",
                                    "na.treat obligatory if report is intended to be compiled."
                                )
     else if (is.numeric(na.treat)) message("All missings have been replaced by the value ", na.treat,".\n")
     else if (is.character(na.treat) && na.treat=="na.rm") message("All missings have been removed.")
     else if (is.function(na.treat)) {
       message("Missings have been replaced using function ")
       print(na.treat)
     }
   }
 
   if (check==TRUE){
     attr(object,"n.missing")=n.missing
     attr(object,"missingData")=missingData
   }
   attr(object,"na.treat")=na.treat
 
   attr(object,"algorithm")=algorithm
   attr(object,"value")=value
   attr(object,"case")=case
   attr(object,"annotator")=annotator
   attr(object,"by")=by
   attr(object,"smallBetter")=smallBetter
   attr(object,"check")=check
   class(object)=c("challenge", class(object))
   object
 }
diff --git a/tests/testthat/test-challenge.R b/tests/testthat/test-challenge.R
index b3c7ffe..819d1c5 100644
--- a/tests/testthat/test-challenge.R
+++ b/tests/testthat/test-challenge.R
@@ -1,596 +1,633 @@
 # Copyright (c) German Cancer Research Center (DKFZ)
 # All rights reserved.
 #
 # This file is part of challengeR.
 #
 # challengeR is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 2 of the License, or
 # (at your option) any later version.
 #
 # challengeR is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with challengeR. If not, see <https://www.gnu.org/licenses/>.
 
 test_that("empty attribute 'taskName' raises error for single-task challenge", {
   data <- rbind(
       data.frame(algo="A1", value=0.8, case="C1"),
       data.frame(algo="A2", value=0.6, case="C1"))
 
   expect_error(as.challenge(data, taskName="", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                "Argument 'taskName' is empty.", fixed=TRUE)
 })
 
 test_that("only whitespaces in attribute 'taskName' raises error for single-task challenge", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A2", value=0.6, case="C1"))
 
   expect_error(as.challenge(data, taskName="  ", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                "Argument 'taskName' is empty.", fixed=TRUE)
 })
 
 test_that("attributes are set for single-task challenge with specified task name", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A2", value=0.6, case="C1"))
 
   actualChallenge <- as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE)
 
   expect_equal(attr(actualChallenge, "annotator"), NULL)
   expect_equal(attr(actualChallenge, "by"), "task")
   expect_equal(attr(actualChallenge, "smallBetter"), FALSE)
   expect_equal(attr(actualChallenge, "check"), TRUE)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A2"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, 0.6))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C1"))
   expect_equal(as.vector(actualChallenge$T1$task), c("T1", "T1"))
 
   # expect that there's no attribute "task"
   expect_equal(attr(actualChallenge, "task"), NULL)
   expect_equal(attr(actualChallenge$T1, "task"), NULL)
   expect_equal(attr(actualChallenge$T2, "task"), NULL)
 })
 
 test_that("attributes are set for single-task challenge with dummy task name", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A2", value=0.6, case="C1"))
 
   actualChallenge <- as.challenge(data, algorithm="algo", case="case", value="value", smallBetter=FALSE)
 
   expect_equal(attr(actualChallenge, "annotator"), NULL)
   expect_equal(attr(actualChallenge, "by"), "task")
   expect_equal(attr(actualChallenge, "smallBetter"), FALSE)
   expect_equal(attr(actualChallenge, "check"), TRUE)
 
   expect_equal(as.vector(actualChallenge$dummyTask$algo), c("A1", "A2"))
   expect_equal(as.vector(actualChallenge$dummyTask$value), c(0.8, 0.6))
   expect_equal(as.vector(actualChallenge$dummyTask$case), c("C1", "C1"))
   expect_equal(as.vector(actualChallenge$dummyTask$task), c("dummyTask", "dummyTask"))
 
   # expect that there's no attribute "task"
   expect_equal(attr(actualChallenge, "task"), NULL)
   expect_equal(attr(actualChallenge$dummyTask, "task"), NULL)
   expect_equal(attr(actualChallenge$dummyTask, "task"), NULL)
 })
 
 test_that("leading and trailing whitespaces are trimmed for attribute 'taskName'", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A2", value=0.6, case="C1"))
 
   actualChallenge <- as.challenge(data, taskName=" T1  ", algorithm="algo", case="case", value="value", smallBetter=FALSE)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A2"))
 })
 
 test_that("attributes are set for multi-task challenge", {
   dataTask1 <- cbind(task="T1",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1"),
                        data.frame(algo="A2", value=0.6, case="C1")
                      ))
   dataTask2 <- cbind(task="T2",
                      rbind(
                        data.frame(algo="A1", value=0.2, case="C1"),
                        data.frame(algo="A2", value=0.3, case="C1")
                      ))
 
   data <- rbind(dataTask1, dataTask2)
 
   actualChallenge <- as.challenge(data, by="task", algorithm="algo", case="case", value="value", smallBetter=TRUE)
 
   expect_equal(attr(actualChallenge, "annotator"), NULL)
   expect_equal(attr(actualChallenge, "by"), "task")
   expect_equal(attr(actualChallenge, "smallBetter"), TRUE)
   expect_equal(attr(actualChallenge, "check"), TRUE)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A2"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, 0.6))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C1"))
   expect_equal(as.vector(actualChallenge$T1$task), c("T1", "T1"))
 
   expect_equal(as.vector(actualChallenge$T2$algo), c("A1", "A2"))
   expect_equal(as.vector(actualChallenge$T2$value), c(0.2, 0.3))
   expect_equal(as.vector(actualChallenge$T2$case), c("C1", "C1"))
   expect_equal(as.vector(actualChallenge$T2$task), c("T2", "T2"))
 
   # expect that there's no attribute "task"
   expect_equal(attr(actualChallenge, "task"), NULL)
   expect_equal(attr(actualChallenge$T1, "task"), NULL)
   expect_equal(attr(actualChallenge$T2, "task"), NULL)
 })
 
 test_that("attributes are set for multi-task challenge with sanity check disabled", {
   dataTask1 <- cbind(task="T1",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1"),
                        data.frame(algo="A2", value=0.6, case="C1")
                      ))
   dataTask2 <- cbind(task="T2",
                      rbind(
                        data.frame(algo="A1", value=0.2, case="C1"),
                        data.frame(algo="A2", value=0.3, case="C1")
                      ))
 
   data <- rbind(dataTask1, dataTask2)
 
   actualChallenge <- as.challenge(data, by="task", algorithm="algo", case="case", value="value", smallBetter=TRUE, check=FALSE)
 
   expect_equal(attr(actualChallenge, "annotator"), NULL)
   expect_equal(attr(actualChallenge, "by"), "task")
   expect_equal(attr(actualChallenge, "smallBetter"), TRUE)
   expect_equal(attr(actualChallenge, "check"), FALSE)
   expect_equal(as.vector(actualChallenge$algo), c("A1", "A2", "A1", "A2"))
   expect_equal(as.vector(actualChallenge$value), c(0.8, 0.6, 0.2, 0.3))
   expect_equal(as.vector(actualChallenge$case), c("C1", "C1", "C1", "C1"))
   expect_equal(as.vector(actualChallenge$task), c("T1", "T1", "T2", "T2"))
 })
 
 test_that("attribute 'taskName' is ignored for multi-task challenge", {
   dataTask1 <- cbind(task="T1",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1"),
                        data.frame(algo="A2", value=0.6, case="C1")
                      ))
   dataTask2 <- cbind(task="T2",
                      rbind(
                        data.frame(algo="A1", value=0.2, case="C1"),
                        data.frame(algo="A2", value=0.3, case="C1")
                      ))
 
   data <- rbind(dataTask1, dataTask2)
 
   expect_warning(as.challenge(data, taskName="T1", by="task", algorithm="algo", case="case", value="value", smallBetter=TRUE),
                  "Argument 'taskName' is ignored for multi-task data set.", fixed=TRUE)
 })
 
 test_that("missing algorithm performances are added as NAs for single-task challenge", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A2", value=0.6, case="C2"))
 
   expect_message(actualChallenge <- as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                  "Performance of not all algorithms has been observed for all cases. Therefore, missings have been inserted in the following cases:", fixed=TRUE)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A1", "A2", "A2"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, NA, NA, 0.6))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C2", "C1", "C2"))
   expect_equal(as.vector(actualChallenge$T1$task), c("T1", "T1", "T1", "T1"))
 })
 
 test_that("multi-task data set containing one task is interpreted as single-task data set, missing algorithm performances are added", {
   data <- cbind(task="T1",
                 rbind(
                   data.frame(algo="A1", value=0.8, case="C1"),
                   data.frame(algo="A2", value=0.6, case="C2")
                 ))
 
   # do not specify parameter "by" to interpret multi-task data set as single-task data set
   expect_message(actualChallenge <- as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                  "Performance of not all algorithms has been observed for all cases. Therefore, missings have been inserted in the following cases:", fixed=TRUE)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A1", "A2", "A2"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, NA, NA, 0.6))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C2", "C1", "C2"))
 })
 
 test_that("missing algorithm performances are added as NAs for multi-task challenge (2 tasks in data set)", {
   dataTask1 <- cbind(task="T1",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1"),
                        data.frame(algo="A2", value=0.6, case="C2")
                      ))
   dataTask2 <- cbind(task="T2",
                      rbind(
                        data.frame(algo="A1", value=0.2, case="C1"),
                        data.frame(algo="A1", value=0.3, case="C2"),
                        data.frame(algo="A2", value=0.4, case="C1")
                      ))
 
   data <- rbind(dataTask1, dataTask2)
 
   expect_message(actualChallenge <- as.challenge(data, by="task", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                  "Performance of not all algorithms has been observed for all cases in task 'T1'. Therefore, missings have been inserted in the following cases:", fixed=TRUE)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A1", "A2", "A2"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, NA, NA, 0.6))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C2", "C1", "C2"))
   expect_equal(as.vector(actualChallenge$T1$task), c("T1", "T1", "T1", "T1"))
 
   expect_equal(as.vector(actualChallenge$T2$algo), c("A1", "A1", "A2", "A2"))
   expect_equal(as.vector(actualChallenge$T2$value), c(0.2, 0.3, 0.4, NA))
   expect_equal(as.vector(actualChallenge$T2$case), c("C1", "C2", "C1", "C2"))
   expect_equal(as.vector(actualChallenge$T2$task), c("T2", "T2", "T2", "T2"))
 })
 
 test_that("missing algorithm performances are not added as NA with sanity check disabled for single-task challenge", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A2", value=0.6, case="C2"))
 
   actualChallenge <- as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE, check=FALSE)
 
   expect_equal(as.vector(actualChallenge$algo), c("A1", "A2"))
   expect_equal(as.vector(actualChallenge$value), c(0.8, 0.6))
   expect_equal(as.vector(actualChallenge$case), c("C1", "C2"))
 })
 
 test_that("missing algorithm performances are not added as NA with sanity check disabled for multi-task challenge (2 tasks in data set)", {
   dataTask1 <- cbind(task="T1",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1"),
                        data.frame(algo="A2", value=0.6, case="C2")
                      ))
   dataTask2 <- cbind(task="T2",
                      rbind(
                        data.frame(algo="A1", value=0.2, case="C1"),
                        data.frame(algo="A1", value=0.3, case="C2"),
                        data.frame(algo="A2", value=0.4, case="C1")
                      ))
 
   data <- rbind(dataTask1, dataTask2)
 
   actualChallenge <- as.challenge(data, by="task", algorithm="algo", case="case", value="value", smallBetter=FALSE, check=FALSE)
 
   expect_equal(as.vector(actualChallenge$algo), c("A1", "A2", "A1", "A1", "A2"))
   expect_equal(as.vector(actualChallenge$value), c(0.8, 0.6, 0.2, 0.3, 0.4))
   expect_equal(as.vector(actualChallenge$case), c("C1", "C2", "C1", "C2", "C1"))
 })
 
 test_that("case cannot appear more than once per algorithm for single-task challenge", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A1", value=0.8, case="C1"))
 
   expect_error(as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                "The following case(s) appear(s) more than once for the same algorithm. Please revise. Or are you considering a multi-task challenge and forgot to specify argument 'by'?\nCase(s): C1", fixed=TRUE)
 })
 
 test_that("multi-task data set containing one task is interpreted as single-task data set, case cannot appear more than once per algorithm", {
   data <- cbind(task="T1",
                 rbind(
                   data.frame(algo="A1", value=0.8, case="C1"),
                   data.frame(algo="A1", value=0.8, case="C1")
                 ))
 
   # do not specify parameter "by" to interpret multi-task data set as single-task data set
   expect_error(as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                "The following case(s) appear(s) more than once for the same algorithm. Please revise. Or are you considering a multi-task challenge and forgot to specify argument 'by'?\nCase(s): C1", fixed=TRUE)
 })
 
 test_that("case cannot appear more than once per algorithm for multi-task challenge (1 task in data set)", {
   data <- cbind(task="T1",
                 rbind(
                   data.frame(algo="A1", value=0.8, case="C1"),
                   data.frame(algo="A1", value=0.8, case="C1")
                 ))
 
   expect_error(as.challenge(data, by="task", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                "The following case(s) appear(s) more than once for the same algorithm. Please revise. Or are you considering a multi-task challenge and forgot to specify argument 'by'?\nCase(s): C1", fixed=TRUE)
 })
 
 test_that("cases cannot appear more than once per algorithm for single-task challenge", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A2", value=0.7, case="C1"),
     data.frame(algo="A1", value=0.5, case="C2"),
     data.frame(algo="A2", value=0.6, case="C2"),
     data.frame(algo="A2", value=0.6, case="C2"))
 
   expect_error(as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                "The following case(s) appear(s) more than once for the same algorithm. Please revise. Or are you considering a multi-task challenge and forgot to specify argument 'by'?\nCase(s): C1, C2", fixed=TRUE)
 })
 
 test_that("cases cannot appear more than once per algorithm for multi-task challenge (1 task in data set)", {
   data <- cbind(task="T1",
                 rbind(
                   data.frame(algo="A1", value=0.8, case="C1"),
                   data.frame(algo="A1", value=0.8, case="C1"),
                   data.frame(algo="A2", value=0.7, case="C1"),
                   data.frame(algo="A1", value=0.5, case="C2"),
                   data.frame(algo="A2", value=0.6, case="C2"),
                   data.frame(algo="A2", value=0.6, case="C2")
                 ))
 
   expect_error(as.challenge(data, by="task", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                "The following case(s) appear(s) more than once for the same algorithm. Please revise. Or are you considering a multi-task challenge and forgot to specify argument 'by'?\nCase(s): C1, C2", fixed=TRUE)
 })
 
 test_that("cases cannot appear more than once per algorithm for multi-task challenge (2 tasks in data set)", {
   dataTask1 <- cbind(task="T1",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1") # let T1 pass
                      ))
 
   dataTask2 <- cbind(task="T2",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1"),
                        data.frame(algo="A1", value=0.8, case="C1"),
                        data.frame(algo="A2", value=0.7, case="C1"),
                        data.frame(algo="A1", value=0.5, case="C2"),
                        data.frame(algo="A2", value=0.6, case="C2"),
                        data.frame(algo="A2", value=0.6, case="C2")
                      ))
 
   data <- rbind(dataTask1, dataTask2)
 
   expect_error(as.challenge(data, by="task", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                "The following case(s) appear(s) more than once for the same algorithm in task 'T2'. Please revise.\nCase(s): C1, C2", fixed=TRUE)
 })
 
 test_that("cases cannot appear more than once per algorithm when missing data was added for single-task challenge", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A2", value=0.6, case="C2"),
     data.frame(algo="A2", value=0.6, case="C2"))
 
   expect_error(as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                "The following case(s) appear(s) more than once for the same algorithm. Please revise. Or are you considering a multi-task challenge and forgot to specify argument 'by'?\nCase(s): C1, C2", fixed=TRUE)
 })
 
 test_that("user is notified of duplicate cases when multi-task data set is interpreted as single-task data set (2 tasks in data set)", {
   dataTask1 <- cbind(task="T1",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1")
                      ))
 
   dataTask2 <- cbind(task="T2",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1")
                      ))
 
   data <- rbind(dataTask1, dataTask2)
 
   # do not specify parameter "by" to interpret multi-task data set as single-task data set
   expect_error(as.challenge(data, taskName="New task", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                "The following case(s) appear(s) more than once for the same algorithm. Please revise. Or are you considering a multi-task challenge and forgot to specify argument 'by'?\nCase(s): C1", fixed=TRUE)
 })
 
 test_that("user is notified of missing algorithm performance when multi-task data set is interpreted as single-task data set (2 tasks in data set)", {
   dataTask1 <- cbind(task="T1",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1")
                      ))
 
   dataTask2 <- cbind(task="T2",
                      rbind(
                        data.frame(algo="A2", value=0.6, case="C2")
                      ))
 
   data <- rbind(dataTask1, dataTask2)
 
   # do not specify parameter "by" to interpret multi-task data set as single-task data set
   expect_message(as.challenge(data, taskName="New task", algorithm="algo", case="case", value="value", smallBetter=FALSE),
                  "Performance of not all algorithms has been observed for all cases. Therefore, missings have been inserted in the following cases:", fixed=TRUE)
 })
 
 test_that("NAs are replaced by numeric value for single-task challenge", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A1", value=NA, case="C2"),
     data.frame(algo="A2", value=0.6, case="C1"),
     data.frame(algo="A2", value=NA, case="C2"))
 
   actualChallenge <- as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE, na.treat=0)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A1", "A2", "A2"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, 0.0, 0.6, 0.0))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C2", "C1", "C2"))
 })
 
 test_that("NAs are replaced by numeric value for multi-task challenge", {
   dataTask1 <- cbind(task="T1",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1"),
                        data.frame(algo="A1", value=NA, case="C2")
                      ))
   dataTask2 <- cbind(task="T2",
                      rbind(
                        data.frame(algo="A2", value=NA, case="C1"),
                        data.frame(algo="A2", value=0.5, case="C2")
                      ))
 
   data <- rbind(dataTask1, dataTask2)
 
   actualChallenge <- as.challenge(data, by="task", algorithm="algo", case="case", value="value", smallBetter=FALSE, na.treat=0)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A1"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, 0.0))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C2"))
 
   expect_equal(as.vector(actualChallenge$T2$algo), c("A2", "A2"))
   expect_equal(as.vector(actualChallenge$T2$value), c(0.0, 0.5))
   expect_equal(as.vector(actualChallenge$T2$case), c("C1", "C2"))
 })
 
 test_that("NAs are replaced by function value for single-task challenge", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A1", value=NA, case="C2"),
     data.frame(algo="A2", value=0.6, case="C1"),
     data.frame(algo="A2", value=NA, case="C2"))
 
   replacementFunction <- function(x) { 2 }
 
   actualChallenge <- as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE, na.treat=replacementFunction)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A1", "A2", "A2"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, 2.0, 0.6, 2.0))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C2", "C1", "C2"))
 })
 
 test_that("NAs are replaced by function value for multi-task challenge", {
   dataTask1 <- cbind(task="T1",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1"),
                        data.frame(algo="A1", value=NA, case="C2")
                      ))
   dataTask2 <- cbind(task="T2",
                      rbind(
                        data.frame(algo="A2", value=NA, case="C1"),
                        data.frame(algo="A2", value=0.5, case="C2")
                      ))
 
   data <- rbind(dataTask1, dataTask2)
 
   replacementFunction <- function(x) { 2 }
 
   actualChallenge <- as.challenge(data, by="task", algorithm="algo", case="case", value="value", smallBetter=FALSE, na.treat=replacementFunction)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A1"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, 2.0))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C2"))
 
   expect_equal(as.vector(actualChallenge$T2$algo), c("A2", "A2"))
   expect_equal(as.vector(actualChallenge$T2$value), c(2.0, 0.5))
   expect_equal(as.vector(actualChallenge$T2$case), c("C1", "C2"))
 })
 
 test_that("NAs are removed for single-task challenge", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A1", value=NA, case="C2"),
     data.frame(algo="A2", value=0.6, case="C1"),
     data.frame(algo="A2", value=NA, case="C2"))
 
   actualChallenge <- as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE, na.treat="na.rm")
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A2"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, 0.6))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C1"))
 })
 
 test_that("NAs are removed for multi-task challenge", {
   dataTask1 <- cbind(task="T1",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1"),
                        data.frame(algo="A1", value=NA, case="C2")
                      ))
   dataTask2 <- cbind(task="T2",
                      rbind(
                        data.frame(algo="A2", value=NA, case="C1"),
                        data.frame(algo="A2", value=0.5, case="C2")
                      ))
 
   data <- rbind(dataTask1, dataTask2)
 
   actualChallenge <- as.challenge(data, by="task", algorithm="algo", case="case", value="value", smallBetter=FALSE, na.treat="na.rm")
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1"))
 
   expect_equal(as.vector(actualChallenge$T2$algo), c("A2"))
   expect_equal(as.vector(actualChallenge$T2$value), c(0.5))
   expect_equal(as.vector(actualChallenge$T2$case), c("C2"))
 })
 
 test_that("automatically added NAs are replaced by numeric value for single-task challenge", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A2", value=0.6, case="C2"))
 
   expect_message(actualChallenge <- as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE, na.treat=0),
                  "Performance of not all algorithms has been observed for all cases. Therefore, missings have been inserted in the following cases:", fixed=TRUE)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A1", "A2", "A2"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, 0.0, 0.0, 0.6))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C2", "C1", "C2"))
 })
 
 test_that("automatically added NAs are replaced by numeric value for multi-task challenge", {
   dataTask1 <- cbind(task="T1",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1"),
                        data.frame(algo="A2", value=0.6, case="C2")
                      ))
   dataTask2 <- cbind(task="T2",
                      rbind(
                        data.frame(algo="A1", value=0.2, case="C1"),
                        data.frame(algo="A1", value=0.3, case="C2"),
                        data.frame(algo="A2", value=0.4, case="C1")
                      ))
 
   data <- rbind(dataTask1, dataTask2)
 
   expect_message(actualChallenge <- as.challenge(data, by="task", algorithm="algo", case="case", value="value", smallBetter=FALSE, na.treat=0),
                  "Performance of not all algorithms has been observed for all cases in task 'T1'. Therefore, missings have been inserted in the following cases:", fixed=TRUE)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A1", "A2", "A2"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, 0.0, 0.0, 0.6))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C2", "C1", "C2"))
 
   expect_equal(as.vector(actualChallenge$T2$algo), c("A1", "A1", "A2", "A2"))
   expect_equal(as.vector(actualChallenge$T2$value), c(0.2, 0.3, 0.4, 0.0))
   expect_equal(as.vector(actualChallenge$T2$case), c("C1", "C2", "C1", "C2"))
 })
 
 test_that("automatically added NAs are removed for single-task challenge", {
   data <- rbind(
     data.frame(algo="A1", value=0.8, case="C1"),
     data.frame(algo="A2", value=0.6, case="C2"))
 
   expect_message(actualChallenge <- as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE, na.treat="na.rm"),
                  "Performance of not all algorithms has been observed for all cases. Therefore, missings have been inserted in the following cases:", fixed=TRUE)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A2"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, 0.6))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C2"))
 })
 
 test_that("automatically added NAs are removed for multi-task challenge", {
   dataTask1 <- cbind(task="T1",
                      rbind(
                        data.frame(algo="A1", value=0.8, case="C1"),
                        data.frame(algo="A2", value=0.6, case="C2")
                      ))
   dataTask2 <- cbind(task="T2",
                      rbind(
                        data.frame(algo="A1", value=0.2, case="C1"),
                        data.frame(algo="A1", value=0.3, case="C2"),
                        data.frame(algo="A2", value=0.4, case="C1")
                      ))
 
   data <- rbind(dataTask1, dataTask2)
 
   expect_message(actualChallenge <- as.challenge(data, by="task", algorithm="algo", case="case", value="value", smallBetter=FALSE, na.treat="na.rm"),
                  "Performance of not all algorithms has been observed for all cases in task 'T1'. Therefore, missings have been inserted in the following cases:", fixed=TRUE)
 
   expect_equal(as.vector(actualChallenge$T1$algo), c("A1", "A2"))
   expect_equal(as.vector(actualChallenge$T1$value), c(0.8, 0.6))
   expect_equal(as.vector(actualChallenge$T1$case), c("C1", "C2"))
 
   expect_equal(as.vector(actualChallenge$T2$algo), c("A1", "A1", "A2"))
   expect_equal(as.vector(actualChallenge$T2$value), c(0.2, 0.3, 0.4))
   expect_equal(as.vector(actualChallenge$T2$case), c("C1", "C2", "C1"))
 })
 
+test_that("class of 'algorithm' column must be 'factor' for single-task challenge", {
+  data <- rbind(
+    data.frame(algo="A1", value=0.8, case="C1"),
+    data.frame(algo="A1", value=0.7, case="C2"),
+    data.frame(algo="A2", value=0.6, case="C1"),
+    data.frame(algo="A2", value=0.5, case="C2"))
+  
+  actualChallenge <- as.challenge(data, taskName="T1", algorithm="algo", case="case", value="value", smallBetter=FALSE, na.treat=0)
+  
+  expect_equal(class(actualChallenge$T1$algo), "factor")
+})  
+
+test_that("class of 'algorithm' column must be 'factor' for multi-task challenge", {
+  dataTask1 <- cbind(task="T1",
+                     rbind(
+                       data.frame(algo="A1", value=0.81, case="C1"),
+                       data.frame(algo="A2", value=0.72, case="C1"),
+                       data.frame(algo="A1", value=0.65, case="C2"),
+                       data.frame(algo="A2", value=0.95, case="C2")
+                     ))
+  dataTask2 <- cbind(task="T2",
+                     rbind(
+                       data.frame(algo="A1", value=0.75, case="C1"),
+                       data.frame(algo="A2", value=0.82, case="C1"),
+                       data.frame(algo="A1", value=0.66, case="C2"),
+                       data.frame(algo="A2", value=0.84, case="C2")
+                     ))
+  
+  data <- rbind(dataTask1, dataTask2)
+  
+  actualChallenge <- as.challenge(data, by="task", algorithm="algo", case="case", value="value", smallBetter=TRUE)
+  
+  expect_equal(class(actualChallenge$T1$algo), "factor")
+  expect_equal(class(actualChallenge$T2$algo), "factor")
+})
+
+