diff --git a/Modules/Biophotonics/python/iMC/scripts/ipcai_to_tensorflow/input_ipcai_data.py b/Modules/Biophotonics/python/iMC/regression/tensorflow_dataset.py similarity index 63% copy from Modules/Biophotonics/python/iMC/scripts/ipcai_to_tensorflow/input_ipcai_data.py copy to Modules/Biophotonics/python/iMC/regression/tensorflow_dataset.py index a4b53d64e0..4bc2065aac 100644 --- a/Modules/Biophotonics/python/iMC/scripts/ipcai_to_tensorflow/input_ipcai_data.py +++ b/Modules/Biophotonics/python/iMC/regression/tensorflow_dataset.py @@ -1,103 +1,83 @@ """Functions for downloading and reading ipcai data.""" from __future__ import print_function import os import numpy import pandas as pd from regression.preprocessing import preprocess class DataSet(object): def __init__(self, images, labels, fake_data=False): if fake_data: self._num_examples = 10000 else: assert images.shape[0] == labels.shape[0], ( "images.shape: %s labels.shape: %s" % (images.shape, labels.shape)) self._num_examples = images.shape[0] images = images.astype(numpy.float32) self._images = images self._labels = labels if self._labels.ndim == 1: self._labels = self._labels[:, numpy.newaxis] self._epochs_completed = 0 self._index_in_epoch = 0 @property def images(self): return self._images @property def labels(self): return self._labels @property def num_examples(self): return self._num_examples @property def epochs_completed(self): return self._epochs_completed def next_batch(self, batch_size, fake_data=False): """Return the next `batch_size` examples from this data set.""" if fake_data: fake_image = [1.0 for _ in xrange(784)] fake_label = 0 return [fake_image for _ in xrange(batch_size)], [ fake_label for _ in xrange(batch_size)] start = self._index_in_epoch self._index_in_epoch += batch_size if self._index_in_epoch > self._num_examples: # Finished epoch self._epochs_completed += 1 # Shuffle the data perm = numpy.arange(self._num_examples) numpy.random.shuffle(perm) self._images = self._images[perm] self._labels = self._labels[perm] # Start next epoch start = 0 self._index_in_epoch = batch_size assert batch_size <= self._num_examples end = self._index_in_epoch return self._images[start:end], self._labels[start:end] -def read_data_sets(dir, fake_data=False): +def read_data_set(dataframe_filename, fake_data=False): - class DataSets(object): - pass - data_sets = DataSets() if fake_data: - data_sets.train = DataSet([], [], fake_data=True) - data_sets.validation = DataSet([], [], fake_data=True) - data_sets.test = DataSet([], [], fake_data=True) - return data_sets + data_set = DataSet([], [], fake_data=True) + return data_set - TRAIN_IMAGES = "ipcai_revision_colon_mean_scattering_train_all_virtual_camera.txt" - TEST_IMAGES = "ipcai_revision_colon_mean_scattering_test_all_virtual_camera.txt" + df_data_set = pd.read_csv(os.path.join(dir, dataframe_filename), + header=[0, 1]) - df_train = pd.read_csv(os.path.join(dir, TRAIN_IMAGES), header=[0, 1]) - df_test = pd.read_csv(os.path.join(dir, TEST_IMAGES), header=[0, 1]) - - train_images, train_labels = preprocess(df_train, snr=10.) - test_images, test_labels = preprocess(df_test, snr=10.) - - train_labels = train_labels.values - test_labels = test_labels.values - - VALIDATION_SIZE = 1 - - validation_images = train_images[:VALIDATION_SIZE] - validation_labels = train_labels[:VALIDATION_SIZE] - train_images = train_images[VALIDATION_SIZE:] - train_labels = train_labels[VALIDATION_SIZE:] - data_sets.train = DataSet(train_images, train_labels) - data_sets.validation = DataSet(validation_images, validation_labels) - data_sets.test = DataSet(test_images, test_labels) - return data_sets \ No newline at end of file + data_set_images, data_set_labels = preprocess(df_data_set, snr=10.) + data_set_labels = data_set_labels.values + data_set = DataSet(data_set_images, data_set_labels) + return data_set diff --git a/Modules/Biophotonics/python/iMC/regression/tensorflow_estimator.py b/Modules/Biophotonics/python/iMC/regression/tensorflow_estimator.py new file mode 100644 index 0000000000..e8c7f66a0d --- /dev/null +++ b/Modules/Biophotonics/python/iMC/regression/tensorflow_estimator.py @@ -0,0 +1,86 @@ + + +import tensorflow as tf + + +def weight_variable(shape): + initial = tf.truncated_normal(shape, stddev=0.1) + return tf.Variable(initial) + + +def bias_variable(shape): + initial = tf.constant(0.1, shape=shape) + return tf.Variable(initial) + + +def conv2d(x, W, padding='SAME'): + return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding) + + +def max_pool_2x1(x): + return tf.nn.max_pool(x, ksize=[1, 2, 1, 1], + strides=[1, 2, 1, 1], padding='SAME') + + +def add_cnn_layer(input, n_inputs, n_outputs, kernel_size, padding='SAME'): + #w = weight_variable([n_inputs, n_outputs]) + #b = bias_variable([n_outputs]) + W = weight_variable([kernel_size, 1, n_inputs, n_outputs]) + b = bias_variable([n_outputs]) + # Hidden layer with RELU activation + #new_layer = tf.nn.relu(tf.add(tf.matmul(input, w), b)) + h_conv = tf.nn.relu(conv2d(input, W, padding=padding) + b) + # Add dropout regularization + #new_layer_with_dropout = tf.nn.dropout(new_layer, keep_prob) + h_pool = max_pool_2x1(h_conv) + return h_pool, W + + +def add_fully_connected_layer(_X, n_inputs, n_outputs, keep_prob): + W = weight_variable([n_inputs, n_outputs]) + b = bias_variable([n_outputs]) + # Hidden layer with RELU activation + new_layer = tf.nn.relu(tf.add(tf.matmul(_X, W), b)) + # Add dropout regularization + new_layer_with_dropout = tf.nn.dropout(new_layer, keep_prob) + + return new_layer_with_dropout, W + + +# this is my exemplary convolutional network +def cnn(_X, n_classes): + # two convolutional layers + layer_1 = add_cnn_layer(_X, 1, 32, 6, padding='VALID') + layer_2 = add_cnn_layer(layer_1, 32, 64, 4) + # flatten last one to be able to apply it to fully connected layer + final_number_of_dimensions = 4*64 + layer_2_flat = tf.reshape(layer_2, [-1, final_number_of_dimensions]) + + # fully connected layer to bring information together + h_fc1_drop = add_fully_connected_layer(layer_2_flat, + final_number_of_dimensions, + 100) + + # return linear output layer + W_fc2 = weight_variable([100, n_classes]) + b_fc2 = bias_variable([n_classes]) + return tf.matmul(h_fc1_drop, W_fc2) + b_fc2 + + +# and this is the simpler multilayer perceptron +def multilayer_perceptron(x, n_bands, n_hidden, n_classes, keep_prob): + flattend_input = tf.reshape(x, [-1, n_bands]) + layer_1, W_1 = add_fully_connected_layer(flattend_input, n_bands, n_hidden, + keep_prob) + layer_2, W_2 = add_fully_connected_layer(layer_1, n_hidden, n_hidden, + keep_prob) + last_hidden_layer, W_3 = add_fully_connected_layer(layer_2, n_hidden, n_hidden, + keep_prob) + + W_out = weight_variable([n_hidden, n_classes]) + b_out = bias_variable([n_classes]) + + regularizers = (tf.nn.l2_loss(W_1) + tf.nn.l2_loss(W_2) + + tf.nn.l2_loss(W_3) + tf.nn.l2_loss(W_out)) + + return tf.matmul(last_hidden_layer, W_out) + b_out, regularizers diff --git a/Modules/Biophotonics/python/iMC/scripts/ipcai_to_tensorflow/input_ipcai_data.py b/Modules/Biophotonics/python/iMC/scripts/ipcai_to_tensorflow/input_ipcai_data.py index a4b53d64e0..65429d72ad 100644 --- a/Modules/Biophotonics/python/iMC/scripts/ipcai_to_tensorflow/input_ipcai_data.py +++ b/Modules/Biophotonics/python/iMC/scripts/ipcai_to_tensorflow/input_ipcai_data.py @@ -1,103 +1,103 @@ """Functions for downloading and reading ipcai data.""" from __future__ import print_function import os import numpy import pandas as pd from regression.preprocessing import preprocess class DataSet(object): def __init__(self, images, labels, fake_data=False): if fake_data: self._num_examples = 10000 else: assert images.shape[0] == labels.shape[0], ( "images.shape: %s labels.shape: %s" % (images.shape, labels.shape)) self._num_examples = images.shape[0] images = images.astype(numpy.float32) self._images = images self._labels = labels if self._labels.ndim == 1: self._labels = self._labels[:, numpy.newaxis] self._epochs_completed = 0 self._index_in_epoch = 0 @property def images(self): return self._images @property def labels(self): return self._labels @property def num_examples(self): return self._num_examples @property def epochs_completed(self): return self._epochs_completed def next_batch(self, batch_size, fake_data=False): """Return the next `batch_size` examples from this data set.""" if fake_data: fake_image = [1.0 for _ in xrange(784)] fake_label = 0 return [fake_image for _ in xrange(batch_size)], [ fake_label for _ in xrange(batch_size)] start = self._index_in_epoch self._index_in_epoch += batch_size if self._index_in_epoch > self._num_examples: # Finished epoch self._epochs_completed += 1 # Shuffle the data perm = numpy.arange(self._num_examples) numpy.random.shuffle(perm) self._images = self._images[perm] self._labels = self._labels[perm] # Start next epoch start = 0 self._index_in_epoch = batch_size assert batch_size <= self._num_examples end = self._index_in_epoch return self._images[start:end], self._labels[start:end] def read_data_sets(dir, fake_data=False): class DataSets(object): pass data_sets = DataSets() if fake_data: data_sets.train = DataSet([], [], fake_data=True) data_sets.validation = DataSet([], [], fake_data=True) data_sets.test = DataSet([], [], fake_data=True) return data_sets - TRAIN_IMAGES = "ipcai_revision_colon_mean_scattering_train_all_virtual_camera.txt" - TEST_IMAGES = "ipcai_revision_colon_mean_scattering_test_all_virtual_camera.txt" + TRAIN_IMAGES = "ipcai_revision_colon_mean_scattering_train_all_spectrocam.txt" + TEST_IMAGES = "ipcai_revision_colon_mean_scattering_test_all_spectrocam.txt" df_train = pd.read_csv(os.path.join(dir, TRAIN_IMAGES), header=[0, 1]) df_test = pd.read_csv(os.path.join(dir, TEST_IMAGES), header=[0, 1]) train_images, train_labels = preprocess(df_train, snr=10.) test_images, test_labels = preprocess(df_test, snr=10.) train_labels = train_labels.values test_labels = test_labels.values VALIDATION_SIZE = 1 validation_images = train_images[:VALIDATION_SIZE] validation_labels = train_labels[:VALIDATION_SIZE] train_images = train_images[VALIDATION_SIZE:] train_labels = train_labels[VALIDATION_SIZE:] data_sets.train = DataSet(train_images, train_labels) data_sets.validation = DataSet(validation_images, validation_labels) data_sets.test = DataSet(test_images, test_labels) return data_sets \ No newline at end of file diff --git a/Modules/Biophotonics/python/iMC/scripts/ipcai_to_tensorflow/script_train_tensorflow_model.py b/Modules/Biophotonics/python/iMC/scripts/ipcai_to_tensorflow/script_train_tensorflow_model.py index bced686c2b..037d76af19 100644 --- a/Modules/Biophotonics/python/iMC/scripts/ipcai_to_tensorflow/script_train_tensorflow_model.py +++ b/Modules/Biophotonics/python/iMC/scripts/ipcai_to_tensorflow/script_train_tensorflow_model.py @@ -1,164 +1,259 @@ -import os +import Image +import ImageEnhance +import logging +import datetime -import numpy as np +import SimpleITK as sitk +import tensorflow as tf +from regression.tensorflow_estimator import multilayer_perceptron +from regression.tensorflow_dataset import read_data_set +from ipcai2016.tasks_common import * import commons +from msi.io.nrrdreader import NrrdReader +import msi.normalize as norm +from regression.estimation import estimate_image_tensorflow sc = commons.ScriptCommons() sc.set_root("/media/wirkert/data/Data/2016_02_02_IPCAI/") sc.create_folders() -''' -A Multilayer Perceptron implementation example using TensorFlow library. -This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/) -Author: Aymeric Damien -Project: https://github.com/aymericdamien/TensorFlow-Examples/ -''' - -# Import MINST data -import input_data -#mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) - ipcai_dir = os.path.join(sc.get_full_dir("INTERMEDIATES_FOLDER")) -import input_ipcai_data -ipcai = input_ipcai_data.read_data_sets(ipcai_dir) - -import tensorflow as tf - -# Parameters -learning_rate = 0.0001 -training_epochs = 300 -batch_size = 100 -display_step = 1 - -# Network Parameters -n_hidden = 100 # hidden layers number of elements -n_bands = 21 # number of features (wavelengths) -n_classes = 1 # number of outputs (one for oxygenation) +sc.add_dir("SMALL_BOWEL_DATA", + os.path.join(sc.get_dir("DATA_FOLDER"), "small_bowel_images")) -# tf Graph input +sc.add_dir("SMALL_BOWEL_RESULT", os.path.join(sc.get_dir("RESULTS_FOLDER"), + "small_bowel_tensorflow")) -x = tf.placeholder("float", [None, 21, 1, 1]) -y = tf.placeholder("float", [None, n_classes]) +sc.add_dir("FILTER_TRANSMISSIONS", + os.path.join(sc.get_dir("DATA_FOLDER"), + "filter_transmissions")) -keep_prob = tf.placeholder("float") - -def weight_variable(shape): - initial = tf.truncated_normal(shape, stddev=0.1) - return tf.Variable(initial) - - -def bias_variable(shape): - initial = tf.constant(0.1, shape=shape) - return tf.Variable(initial) +import input_ipcai_data +ipcai = input_ipcai_data.read_data_sets(ipcai_dir) -def conv2d(x, W, padding='SAME'): - return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding) - - -def max_pool_2x1(x): - return tf.nn.max_pool(x, ksize=[1, 2, 1, 1], - strides=[1, 2, 1, 1], padding='SAME') - - -def add_cnn_layer(input, n_inputs, n_outputs, kernel_size, padding='SAME'): - #w = weight_variable([n_inputs, n_outputs]) - #b = bias_variable([n_outputs]) - W = weight_variable([kernel_size, 1, n_inputs, n_outputs]) - b = bias_variable([n_outputs]) - # Hidden layer with RELU activation - #new_layer = tf.nn.relu(tf.add(tf.matmul(input, w), b)) - h_conv = tf.nn.relu(conv2d(input, W, padding=padding) + b) - # Add dropout regularization - #new_layer_with_dropout = tf.nn.dropout(new_layer, keep_prob) - h_pool = max_pool_2x1(h_conv) - return h_pool - - -def add_fully_connected_layer(_X, n_inputs, n_outputs): - W = weight_variable([n_inputs, n_outputs]) - b = bias_variable([n_outputs]) - # Hidden layer with RELU activation - new_layer = tf.nn.relu(tf.add(tf.matmul(_X, W), b)) - # Add dropout regularization - new_layer_with_dropout = tf.nn.dropout(new_layer, keep_prob) - return new_layer_with_dropout - - -# this is my exemplary convolutional network -def cnn(_X): - # two convolutional layers - layer_1 = add_cnn_layer(_X, 1, 32, 6, padding='VALID') - layer_2 = add_cnn_layer(layer_1, 32, 64, 4) - # flatten last one to be able to apply it to fully connected layer - final_number_of_dimensions = 4*64 - layer_2_flat = tf.reshape(layer_2, [-1, final_number_of_dimensions]) - - # fully connected layer to bring information together - h_fc1_drop = add_fully_connected_layer(layer_2_flat, - final_number_of_dimensions, - 100) - - # return linear output layer - W_fc2 = weight_variable([100, n_classes]) - b_fc2 = bias_variable([n_classes]) - return tf.matmul(h_fc1_drop, W_fc2) + b_fc2 - - -# and this is the simpler multilayer perceptron -def multilayer_perceptron(_X): - flattend_input = tf.reshape(x, [-1, n_bands]) - layer_1 = add_fully_connected_layer(flattend_input, n_bands, n_hidden) - layer_2 = add_fully_connected_layer(layer_1, n_hidden, n_hidden) - last_hidden_layer = add_fully_connected_layer(layer_2, n_hidden, n_hidden) - - W_out = weight_variable([n_hidden, n_classes]) - b_out = bias_variable([n_classes]) - return tf.matmul(last_hidden_layer, W_out) + b_out - - -# Construct the desired model -pred = multilayer_perceptron(x) - -# Define loss and optimizer -cost = tf.reduce_mean(tf.square(pred - y)) -optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer - -# Initializing the variables -init = tf.initialize_all_variables() - -# Launch the graph -with tf.Session() as sess: - sess.run(init) - - # Training cycle - for epoch in range(training_epochs): - avg_cost = 0. - total_batch = int(ipcai.train.num_examples/batch_size) - # Loop over all batches - for i in range(total_batch): - batch_xs, batch_ys = ipcai.train.next_batch(batch_size) - # Fit training using batch data - x_image = np.reshape(batch_xs, [-1, n_bands, 1, 1]) - sess.run(optimizer, feed_dict={x: x_image, y: batch_ys, - keep_prob: 0.75}) - # Compute average loss - avg_cost += sess.run(cost, feed_dict={x: x_image, y: batch_ys, - keep_prob: 1.0})/total_batch - # Display logs per epoch step - if epoch % display_step == 0: - print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost) - - print "Optimization Finished!" - - # Test model - accuracy = tf.reduce_mean(tf.cast(tf.abs(pred-y), "float")) - x_test_image = np.reshape(ipcai.test.images, [-1, n_bands, 1, 1]) - print "Median testing error:", accuracy.eval({x: x_test_image, - y: ipcai.test.labels, - keep_prob:1.0}) +def plot_image(image, axis): + im2 = axis.imshow(image, interpolation='nearest', alpha=1.0) + axis.xaxis.set_visible(False) + + +class TensorFlowCreateOxyImageTask(luigi.Task): + image_name = luigi.Parameter() + df_prefix = luigi.Parameter() + + def requires(self): + return TensorflowTrainRegressor(df_prefix=self.df_prefix), \ + Flatfield(flatfield_folder=sc.get_full_dir("FLAT_FOLDER")), \ + SingleMultispectralImage(image=self.image_name), \ + Dark(dark_folder=sc.get_full_dir("DARK_FOLDER")) + + def output(self): + return luigi.LocalTarget(os.path.join(sc.get_full_dir("SMALL_BOWEL_RESULT"), + os.path.split(self.image_name)[1] + + "_" + self.df_prefix + + "_summary" + ".png")) + + + def run(self): + nrrd_reader = NrrdReader() + tiff_ring_reader = TiffRingReader() + # read the flatfield + flat = nrrd_reader.read(self.input()[1].path) + dark = nrrd_reader.read(self.input()[3].path) + # read the msi + nr_filters = len(sc.other["RECORDED_WAVELENGTHS"]) + msi, segmentation = tiff_ring_reader.read(self.input()[2].path, + nr_filters, resize_factor=0.5) + # only take into account not saturated pixels. + segmentation = np.logical_and(segmentation, + (np.max(msi.get_image(), + axis=-1) < 1000.)) + + # correct image setup + filter_nr = int(self.image_name[-6:-5]) + original_order = np.arange(nr_filters) + new_image_order = np.concatenate(( + original_order[nr_filters - filter_nr:], + original_order[:nr_filters - filter_nr])) + # resort msi to restore original order + msimani.get_bands(msi, new_image_order) + # correct by flatfield + msimani.image_correction(msi, flat, dark) + + # create artificial rgb + rgb_image = msi.get_image()[:, :, [2, 3, 1]] + rgb_image /= np.max(rgb_image) + rgb_image *= 255. + + # preprocess the image + # sortout unwanted bands + print "1" + # zero values would lead to infinity logarithm, thus clip. + msi.set_image(np.clip(msi.get_image(), 0.00001, 2. ** 64)) + # normalize to get rid of lighting intensity + norm.standard_normalizer.normalize(msi) + # transform to absorption + msi.set_image(-np.log(msi.get_image())) + # normalize by l2 for stability + norm.standard_normalizer.normalize(msi, "l2") + print "2" + # estimate + path = "/media/wirkert/data/Data/2016_02_02_IPCAI/results/intermediate/TensorFlowModels" + sitk_image, time = estimate_image_tensorflow(msi, path) + image = sitk.GetArrayFromImage(sitk_image) + + plt.figure() + print "3" + rgb_image = rgb_image.astype(np.uint8) + im = Image.fromarray(rgb_image, 'RGB') + enh_brightness = ImageEnhance.Brightness(im) + im = enh_brightness.enhance(10.) + plotted_image = np.array(im) + top_left_axis = plt.gca() + top_left_axis.imshow(plotted_image, interpolation='nearest') + top_left_axis.xaxis.set_visible(False) + top_left_axis.yaxis.set_visible(False) + + plt.set_cmap("jet") + print "4" + # plot parametric maps + segmentation[0, 0] = 1 + segmentation[0, 1] = 1 + oxy_image = np.ma.masked_array(image[:, :], ~segmentation) + oxy_image[np.isnan(oxy_image)] = 0. + oxy_image[np.isinf(oxy_image)] = 0. + oxy_mean = np.mean(oxy_image) + oxy_image[0, 0] = 0.0 + oxy_image[0, 1] = 1. + + plot_image(oxy_image[:, :], plt.gca()) + + df_image_results = pd.DataFrame(data=np.expand_dims([self.image_name, + oxy_mean * 100., + time], 0), + columns=["image name", + "oxygenation mean [%]", + "time to estimate"]) + + results_file = os.path.join(sc.get_full_dir("SMALL_BOWEL_RESULT"), + "results.csv") + if os.path.isfile(results_file): + df_results = pd.read_csv(results_file, index_col=0) + df_results = pd.concat((df_results, df_image_results)).reset_index( + drop=True + ) + else: + df_results = df_image_results + + df_results.to_csv(results_file) + + plt.savefig(self.output().path, + dpi=250, bbox_inches='tight') + plt.close("all") + + +class TensorflowTrainRegressor(luigi.Task): + df_prefix = luigi.Parameter() + + def output(self): + return luigi.LocalTarget(os.path.join(sc.get_full_dir("INTERMEDIATES_FOLDER"), + "TensorFlowModels", + "model.ckpt")) + + def requires(self): + return tasks_mc.SpectroCamBatch(self.df_prefix) + + def run(self): + # extract data from the batch + tensorflow_dataset = read_data_set(self.input().path) + + # train regressor + # Construct the desired model + + # Network Parameters + nr_filters = len(sc.other["RECORDED_WAVELENGTHS"]) + x = tf.placeholder("float", [None, nr_filters, 1, 1]) + # Construct the desired model + keep_prob = tf.placeholder("float") + pred, regularizers = multilayer_perceptron(x, nr_filters, 100, 1, + keep_prob) + + + # define parameters + learning_rate = 0.0001 + training_epochs = 300 + batch_size = 100 + display_step = 1 + + # Define loss and optimizer + + y = tf.placeholder("float", [None, 1]) + cost = tf.reduce_mean(tf.square(pred - y)) + optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) + + # Initializing the variables + init = tf.initialize_all_variables() + + saver = tf.train.Saver() # defaults to saving all variables + + # Launch the graph + with tf.Session() as sess: + sess.run(init) + + # Training cycle + for epoch in range(training_epochs): + avg_cost = 0. + total_batch = int(ipcai.train.num_examples/batch_size) + # Loop over all batches + for i in range(total_batch): + batch_xs, batch_ys = tensorflow_dataset.next_batch(batch_size) + # Fit training using batch data + x_image = np.reshape(batch_xs, [-1, nr_filters, 1, 1]) + sess.run(optimizer, feed_dict={x: x_image, y: batch_ys, + keep_prob: 0.75}) + # Compute average loss + avg_cost += sess.run(cost, feed_dict={x: x_image, y: batch_ys, + keep_prob: 1.0})/total_batch + # Display logs per epoch step + if epoch % display_step == 0: + print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost) + + print "Optimization Finished!" + saver.save(sess, self.output().path) + + +if __name__ == '__main__': + + # create a folder for the results if necessary + sc.set_root("/media/wirkert/data/Data/2016_02_02_IPCAI/") + sc.create_folders() + + # root folder there the data lies + logging.basicConfig(filename=os.path.join(sc.get_full_dir("LOG_FOLDER"), + "small_bowel_images" + + str(datetime.datetime.now()) + + '.log'), + level=logging.INFO) + luigi.interface.setup_interface_logging() + ch = logging.StreamHandler() + ch.setLevel(logging.INFO) + logger = logging.getLogger() + logger.addHandler(ch) + + sch = luigi.scheduler.CentralPlannerScheduler() + w = luigi.worker.Worker(scheduler=sch) + + # determine files to process + files = get_image_files_from_folder(sc.get_full_dir("SMALL_BOWEL_DATA"), + suffix="F0.tiff", fullpath=True) + + for f in files: + main_task = TensorFlowCreateOxyImageTask(image_name=f, + df_prefix="ipcai_revision_colon_mean_scattering_train") + w.add(main_task) + w.run()