diff --git a/LICENSE b/LICENSE new file mode 100755 index 0000000..8dada3e --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/__init__.py @@ -0,0 +1 @@ + diff --git a/docker_example/Dockerfile b/docker_example/Dockerfile new file mode 100644 index 0000000..d9dbe22 --- /dev/null +++ b/docker_example/Dockerfile @@ -0,0 +1,8 @@ +FROM pytorch/pytorch:1.4-cuda10.1-cudnn7-devel + +# copy files +ADD scripts /workspace/ +RUN chmod +x /workspace/*.sh +RUN mkdir /mnt/data +RUN mkdir /mnt/pred +RUN pip install nibabel diff --git a/docker_example/run_example.py b/docker_example/run_example.py new file mode 100644 index 0000000..1804644 --- /dev/null +++ b/docker_example/run_example.py @@ -0,0 +1,102 @@ +import argparse +import os +import pathlib +import subprocess +import sys +import tempfile + +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) +sys.path.append(os.path.join(os.path.dirname(__file__), "../..")) + + +if __name__ == "__main__": + + import scripts.evalresults as evalresults + + print("Starting MOOD example...") + + parser = argparse.ArgumentParser() + parser.add_argument( + "-i", + "--input_dir", + required=True, + type=str, + help="Input dir requires a subfolder 'toy' and 'toy_label' i.e. input_dir/toy, input_dir/toy_label", + ) + parser.add_argument( + "--no_gpu", + required=False, + default=False, + type=bool, + help="If you have not installed the nvidia docker toolkit, set this arg to False", + ) + + args = parser.parse_args() + + data_dir = args.input_dir + no_gpu = args.no_gpu + + tmp_dir = tempfile.TemporaryDirectory() + output_dir = tmp_dir.name + + toy_input_dir = os.path.join(data_dir, "toy") + toy_label_sample_dir = os.path.join(data_dir, "toy_label", "sample") + toy_label_pixel_dir = os.path.join(data_dir, "toy_label", "pixel") + + output_sample_dir = os.path.join(data_dir, "pixel") + output_pixel_dir = os.path.join(data_dir, "sample") + + example_dir = pathlib.Path(__file__).parent.absolute() + + print("Building docker...") + + try: + # "docker build ${DOCKER_FILE_PATH} -t mood_name" + ret = subprocess.run(["docker", "build", example_dir, "-t", "mood_example"], check=True) + except Exception: + print("Building Docker failed:") + print(ret) + exit(1) + + print("Docker build.") + print("\nPredicting pixel-level anomalies.") + + gpu_str = "" + if not no_gpu: + gpu_str = "--gpus all " + + try: + docker_str = ( + f"sudo docker run {gpu_str}-v {toy_input_dir}:/mnt/data " + f"-v {output_dir}:/mnt/pred mood_example sh /workspace/run_pixel_brain.sh /mnt/data /mnt/pred" + ) + ret = subprocess.run(docker_str.split(" "), check=True,) + except Exception: + print("Running Docker pixel-script failed:") + print(ret) + exit(1) + + print("\nPredicting sample-level anomalies.") + + try: + docker_str = ( + f"sudo docker run {gpu_str}-v {toy_input_dir}:/mnt/data " + f"-v {output_dir}:/mnt/pred mood_example sh /workspace/run_sample_brain.sh /mnt/data /mnt/pred" + ) + ret = subprocess.run(docker_str.split(" "), check=True,) + except Exception: + print("Running Docker sample-script failed:") + print(ret) + exit(1) + + print("\nEvaluating predictions...") + + res_pixel = evalresults.eval_dir(output_dir, toy_label_pixel_dir, mode="pixel",) + print("Pixel-level score:", res_pixel) + + res_sample = evalresults.eval_dir(output_dir, toy_label_sample_dir, mode="sample",) + print("Sample-level scores:", res_sample) + + tmp_dir.cleanup() + + print("Done.") diff --git a/docker_example/scripts/pred_simple.py b/docker_example/scripts/pred_simple.py new file mode 100644 index 0000000..a549f36 --- /dev/null +++ b/docker_example/scripts/pred_simple.py @@ -0,0 +1,54 @@ +import os + +import nibabel as nib +import numpy as np + + +def predict_folder_pixel_abs(input_folder, target_folder): + for f in os.listdir(input_folder): + + source_file = os.path.join(input_folder, f) + target_file = os.path.join(target_folder, f) + + nimg = nib.load(source_file) + nimg_array = nimg.get_fdata() + + nimg_array[nimg_array < 0.01] = 0.5 + + abnomal_score_array = np.abs(nimg_array - 0.5) + + final_nimg = nib.Nifti1Image(abnomal_score_array, affine=nimg.affine) + nib.save(final_nimg, target_file) + + +def predict_folder_sample_abs(input_folder, target_folder): + for f in os.listdir(input_folder): + abnomal_score = np.random.rand() + + with open(os.path.join(target_folder, f + ".txt"), "w") as write_file: + write_file.write(str(abnomal_score)) + + +if __name__ == "__main__": + + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--input", required=True, type=str) + parser.add_argument("-o", "--output", required=True, type=str) + parser.add_argument("-mode", type=str, default="pixel", help="can be either 'pixel' or 'sample'.", required=False) + + args = parser.parse_args() + + input_dir = args.input + output_dir = args.output + mode = args.mode + + if mode == "pixel": + predict_folder_pixel_abs(input_dir, output_dir) + elif mode == "sample": + predict_folder_sample_abs(input_dir, output_dir) + else: + print("Mode not correctly defined. Either choose 'pixel' oder 'sample'") + + # predict_folder_sample_abs("/home/david/data/datasets_slow/mood_brain/toy", "/home/david/data/datasets_slow/mood_brain/target_sample") diff --git a/docker_example/scripts/run_pixel_abdom.sh b/docker_example/scripts/run_pixel_abdom.sh new file mode 100644 index 0000000..3ef044d --- /dev/null +++ b/docker_example/scripts/run_pixel_abdom.sh @@ -0,0 +1 @@ +python /workspace/pred_simple.py -i $1 -o $2 -m 'pixel' \ No newline at end of file diff --git a/docker_example/scripts/run_pixel_brain.sh b/docker_example/scripts/run_pixel_brain.sh new file mode 100644 index 0000000..3ef044d --- /dev/null +++ b/docker_example/scripts/run_pixel_brain.sh @@ -0,0 +1 @@ +python /workspace/pred_simple.py -i $1 -o $2 -m 'pixel' \ No newline at end of file diff --git a/docker_example/scripts/run_sample_abdom.sh b/docker_example/scripts/run_sample_abdom.sh new file mode 100644 index 0000000..6e74b0c --- /dev/null +++ b/docker_example/scripts/run_sample_abdom.sh @@ -0,0 +1 @@ +python /workspace/pred_simple.py -i $1 -o $2 -m 'sample' \ No newline at end of file diff --git a/docker_example/scripts/run_sample_brain.sh b/docker_example/scripts/run_sample_brain.sh new file mode 100644 index 0000000..6e74b0c --- /dev/null +++ b/docker_example/scripts/run_sample_brain.sh @@ -0,0 +1 @@ +python /workspace/pred_simple.py -i $1 -o $2 -m 'sample' \ No newline at end of file diff --git a/readme.md b/readme.md new file mode 100644 index 0000000..f6c49d2 --- /dev/null +++ b/readme.md @@ -0,0 +1,64 @@ +_Copyright © German Cancer Research Center (DKFZ), Division of Medical Image Computing (MIC). Please make sure that your usage of this code is in compliance with the code license:_ +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/MIC-DKFZ/basic_unet_example/blob/master/LICENSE) + +--- + +# MOOD 2020 - Repository + +This repo has the supplementary code for the _Medical Out-of-Distribution Analysis Challenge_ at MICCAI 2020. + +Also checkout our [Website](http://medicalood.dkfz.de/web/) and [Submission Platform](https://www.synapse.org/mood). + +### Requirements + +Please install and use docker for submission: + +For GPU support you may need to install the NVIDIA Container Toolkit: + +Install python requirements: + +``` +pip install -r requirements.txt +``` + +We suggest the following folder structure (to work with our examples): + +``` +data/ +--- brain/ +------ brain_train/ +------ toy/ +------ toy_label/ +--- colon/ +------ colon_train/ +------ toy/ +------ toy_label/ +``` + +### Run Simple Example + +Have a lot at the simple_example in how to build a simple docker, load and write files, and run a simple evaluation. +After installing the requirements you can also try the simple_example: + +``` +python docker_example/run_example.py -i /data/brain/ --no_gpu False +``` + +With `-i` you can pass an input folder (which has to contain a _toy_ and _toy_label_ directory) and with `--no_gpu` you can turn on/off GPU support for the docker (you may need to install the NVIDIA Container Toolkit for docker GPU support). + +### Test Your Docker + +After you built your docker you can test you docker locally using the toy cases. After submitting your docker, we will also report the toy-test scores on the toy examples back to you, so you can check if your submission was successful and the scores match: + +``` +python scripts/test_docker.py -d mood_docker -i /data/ -t sample +``` + +With `-i` you can pass the name of your docker image, with `-i` pass the path to your base*data dir (see \_Requirements*), with `-t` you can define the Challenge Task (either _sample_ or _pixel_), and with `--no_gpu` you can turn on/off GPU support for the docker (you may need to install the NVIDIA Container Toolkit for docker GPU support). + +### Scripts + +In the scripts folder you can find: + +- `test_docker.py` : The script to test your docker. +- `evalresults.py` : The script with our evaluation code. diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..f65f47a --- /dev/null +++ b/requirements.txt @@ -0,0 +1,9 @@ +joblib==0.14.1 +nibabel==3.1.0 +numpy==1.18.3 +packaging==20.3 +pkg-resources==0.0.0 +pyparsing==2.4.7 +scikit-learn==0.22.2.post1 +scipy==1.4.1 +six==1.14.0 \ No newline at end of file diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/scripts/__init__.py @@ -0,0 +1 @@ + diff --git a/scripts/__pycache__/__init__.cpython-36.pyc b/scripts/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..056fdbe Binary files /dev/null and b/scripts/__pycache__/__init__.cpython-36.pyc differ diff --git a/scripts/__pycache__/evalresults.cpython-36.pyc b/scripts/__pycache__/evalresults.cpython-36.pyc new file mode 100644 index 0000000..53cd0d2 Binary files /dev/null and b/scripts/__pycache__/evalresults.cpython-36.pyc differ diff --git a/scripts/evalresults.py b/scripts/evalresults.py new file mode 100644 index 0000000..f1580a4 --- /dev/null +++ b/scripts/evalresults.py @@ -0,0 +1,210 @@ +import json +import os +import random +import traceback + +import nibabel as nib +import numpy as np +from sklearn import metrics + + +class InvalidPredictionException(Exception): + pass + + +class CouldNotProcessException(Exception): + pass + + +def process_file_pixelwise(pred_path, label_path): + + pred_list, label_list = [], [] + label_appended, pred_appended = False, False + try: + label_nimg = nib.load(label_path) + label_array = np.rint(label_nimg.get_fdata()).astype(np.int) + + # should already be in that interval but just be sure + label_array = np.clip(label_array, a_max=1, a_min=0) + label_array = label_array == 1 + + label_list = label_array.flatten() + label_appended = True + + if os.path.exists(pred_path): + + pred_nimg = nib.load(pred_path) + pred_array = pred_nimg.get_fdata(dtype=np.float16) + + if pred_array.shape != label_array.shape: + raise InvalidPredictionException("Array shapes do not match", pred_path) + + # well predicitions should also be in [0,1] + pred_array = np.clip(pred_array, a_max=1.0, a_min=0.0) + + pred_list = pred_array.flatten() + pred_appended = True + + else: + raise InvalidPredictionException("Prediction file not found", pred_path) + + except InvalidPredictionException: + pred_array = np.zeros_like(label_array) + pred_list = pred_array.flatten() + except Exception: + if label_appended and not pred_appended: + pred_array = np.zeros_like(label_array) + pred_list = pred_array.flatten() + else: + raise CouldNotProcessException("CouldNotProcessException") + + return pred_list, label_list + + +def process_file_samplewise(pred_path, label_path): + + label_appended, pred_appended = False, False + try: + + with open(label_path, "r") as val_fl: + val_str = val_fl.readline() + label = int(val_str) + + label_appended = True + + if os.path.exists(pred_path): + + with open(pred_path, "r") as pred_fl: + pred_str = pred_fl.readline() + pred = float(pred_str) + + # predicitions should also be in [0,1] + pred = np.clip(pred, a_max=1.0, a_min=0.0) + + pred_appended = True + + else: + raise InvalidPredictionException("Prediction file not found", pred_path) + + except InvalidPredictionException: + pred = 0.0 + except Exception: + if label_appended and not pred_appended: + pred = 0.0 + else: + traceback.print_exc() + raise CouldNotProcessException("CouldNotProcessException") + + return [pred], [label] + + +def eval_list(pred_file_list, label_file_list, mode="pixel"): + + label_vals = [] + pred_vals = [] + + for pred_path, label_path in zip(pred_file_list, label_file_list): + try: + if mode == "pixel": + pred_list, label_list = process_file_pixelwise(pred_path, label_path) + elif mode == "sample": + pred_list, label_list = process_file_samplewise(pred_path, label_path) + else: + pred_list, label_list = [] + pred_vals.append(pred_list) + label_vals.append(label_list) + except Exception: + print(f"Smth went fundamentally wrong with {pred_path}") + + label_vals = np.concatenate(label_vals, axis=0) + pred_vals = np.concatenate(pred_vals, axis=0) + + return metrics.average_precision_score(label_vals, pred_vals) + + +def eval_dir(pred_dir, label_dir, mode="pixel", save_file=None): + + pred_file_list = [] + label_file_list = [] + + for f_name in sorted(os.listdir(label_dir)): + + pred_file_path = os.path.join(pred_dir, f_name) + label_file_path = os.path.join(label_dir, f_name) + + pred_file_list.append(pred_file_path) + label_file_list.append(label_file_path) + + score = eval_list(pred_file_list, label_file_list, mode=mode) + + if save_file is not None: + with open(save_file, "w") as outfile: + json.dump(score, outfile) + + return score + + +def bootstrap_dir( + pred_dir, label_dir, splits_file=None, n_runs=10, n_files=2, save_dir=None, seed=123, mode="pixel", +): + + random.seed(seed) + + all_preds_file_list = [] + all_labels_file_list = [] + for f_name in sorted(os.listdir(label_dir)): + + pred_file_path = os.path.join(pred_dir, f_name) + label_file_path = os.path.join(label_dir, f_name) + + all_preds_file_list.append(pred_file_path) + all_labels_file_list.append(label_file_path) + + all_preds_file_list = np.array(all_preds_file_list) + all_labels_file_list = np.array(all_labels_file_list) + + scores = [] + if splits_file is not None: + with open(splits_file, "r") as json_file: + split_list = json.load(json_file) + + else: + split_list = [] + idx_list = list(range(len(all_labels_file_list))) + split_list = [random.sample(idx_list, k=n_files) for r in range(n_runs)] + + for idx_sub_list in split_list: + scores.append(eval_list(all_preds_file_list[idx_sub_list], all_labels_file_list[idx_sub_list], mode=mode,)) + + if save_dir is not None: + with open(os.path.join(save_dir, "splits.json"), "w") as outfile: + json.dump(split_list, outfile) + with open(os.path.join(save_dir, "scores.json"), "w") as outfile: + json.dump(scores, outfile) + + return np.mean(scores) + + +# if __name__ == "__main__": +# x1 = eval_dir( +# "/home/david/data/datasets_slow/mood_brain/target_pixel", +# "/home/david/data/datasets_slow/mood_brain/toy_label/pixel", +# mode="pixel", +# ) +# print(x1) + +# x2 = eval_dir( +# "/home/david/data/datasets_slow/mood_brain/target_sample", +# "/home/david/data/datasets_slow/mood_brain/toy_label/sample", +# mode="sample", +# ) +# print(x2) + +# x4 = bootstrap_dir( +# "/home/david/data/datasets_slow/mood_brain/target_pixel", +# "/home/david/data/datasets_slow/mood_brain/toy_label/pixel", +# save_dir="/home/david/data/datasets_slow/mood_brain/tmp", +# n_runs=5, +# seed=124, +# ) +# print(x4) diff --git a/scripts/test_docker.py b/scripts/test_docker.py new file mode 100644 index 0000000..c8802e6 --- /dev/null +++ b/scripts/test_docker.py @@ -0,0 +1,133 @@ +import argparse +import os +import subprocess +import sys +import tempfile + +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) +sys.path.append(os.path.join(os.path.dirname(__file__), "../..")) + + +if __name__ == "__main__": + + import evalresults + + print("Testing MOOD docker image...") + + parser = argparse.ArgumentParser() + parser.add_argument( + "-d", "--docker_name", required=True, type=str, help="Name of the docker image you want to test" + ) + parser.add_argument( + "-i", + "--input_dir", + required=True, + type=str, + help=( + "Data dir, it will require to contain a folder 'brain' and 'abdom' which will both " + "each require a subfolder 'toy' and 'toy_label' i.e. data_dir/brain/toy," + " data_dir/brain/toy_label, data_dir/abdom/toy, data_dir/abdom/toy_label" + ), + ) + parser.add_argument( + "-o", "--output_dir", required=False, type=str, help="Folder where the output/ predictions will be written too" + ) + parser.add_argument( + "-t", "--task", required=True, choices=["sample", "pixel"], type=str, help="Task, either 'pixel' or 'sample' " + ) + parser.add_argument( + "--no_gpu", + required=False, + default=False, + type=bool, + help="If you have not installed the nvidia docker toolkit, set this arg to False", + ) + + args = parser.parse_args() + + docker_name = args.docker_name + input_dir = args.input_dir + output_dir = args.output_dir + task = args.task + no_gpu = args.no_gpu + + tmp_dir = None + if output_dir is None: + tmp_dir = tempfile.TemporaryDirectory() + output_dir = tmp_dir.name + + brain_data_dir = os.path.join(input_dir, "brain") + abdom_data_dir = os.path.join(input_dir, "abdom") + + if not os.path.exists(brain_data_dir): + print(f"Make sure there is a 'brain' folder in your input_dir, i.e. {brain_data_dir}") + exit(1) + if not os.path.exists(abdom_data_dir): + print(f"Make sure there is a 'abdom' folder in your input_dir, i.e. {abdom_data_dir}") + exit(1) + + brain_toy_data_dir = os.path.join(brain_data_dir, "toy") + abdom_toy_data_dir = os.path.join(abdom_data_dir, "toy") + + brain_toy_label_dir = os.path.join(brain_data_dir, "toy_label", task) + abdom_toy_label_dir = os.path.join(abdom_data_dir, "toy_label", task) + + if not os.path.exists(brain_toy_data_dir) or not os.path.exists(brain_toy_label_dir): + print(f"Make sure there is a 'toy' and 'toy_label' folder in your brain_dir ({brain_data_dir})") + exit(1) + if not os.path.exists(abdom_toy_data_dir) or not os.path.exists(abdom_toy_label_dir): + print(f"Make sure there is a 'toy' and 'toy_label' folder in your abdom_dir ({abdom_data_dir})") + exit(1) + + output_brain_dir = os.path.join(output_dir, "brain") + output_abdom_dir = os.path.join(output_dir, "abdom") + + os.makedirs(output_brain_dir, exist_ok=True) + os.makedirs(output_abdom_dir, exist_ok=True) + + gpu_str = "" + if not no_gpu: + gpu_str = "--gpus all " + + print("\nPredicting brain data...") + + try: + docker_str = ( + f"sudo docker run {gpu_str}-v {brain_toy_data_dir}:/mnt/data " + f"-v {output_brain_dir}:/mnt/pred {docker_name} sh /workspace/run_{task}_brain.sh /mnt/data /mnt/pred" + ) + ret = subprocess.run(docker_str.split(" "), check=True,) + except Exception: + print(f"Running Docker brain-{task}-script failed:") + print(ret) + exit(1) + + print("Predicting abdominal data...") + + try: + docker_str = ( + f"sudo docker run {gpu_str}-v {abdom_toy_data_dir}:/mnt/data " + f"-v {output_abdom_dir}:/mnt/pred {docker_name} sh /workspace/run_{task}_abdom.sh /mnt/data /mnt/pred" + ) + ret = subprocess.run(docker_str.split(" "), check=True,) + except Exception: + print(f"Running Docker abdom-{task}-script failed:") + print(ret) + exit(1) + + print("\nEvaluating predictions...") + + brain_score = evalresults.eval_dir( + output_brain_dir, brain_toy_label_dir, mode=task, save_file=os.path.join(output_dir, "brain_score.txt") + ) + print("Brain-dataset score:", brain_score) + + abdom_score = evalresults.eval_dir( + output_abdom_dir, abdom_toy_label_dir, mode=task, save_file=os.path.join(output_dir, "abdom_score.txt") + ) + print("Abdominal-dataset score:", abdom_score) + + if tmp_dir is not None: + tmp_dir.cleanup() + + print("Done.")