Skip to content

Commit

Permalink
changed csv_file -> csv_path_file in various places and merged carole…
Browse files Browse the repository at this point in the history
… changes
  • Loading branch information
Thomas authored and csudre committed Jun 5, 2019
1 parent 7e696de commit 89b0aaf
Show file tree
Hide file tree
Showing 9 changed files with 227 additions and 228 deletions.
413 changes: 207 additions & 206 deletions demos/module_examples/FullCSVReaderDemo.ipynb

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions doc/source/config_spec.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ within each section.

Name | Type | Example | Default
---- | ---- | ------- | -------
[csv_file](#csv-file) | `string` | `csv_file=file_list.csv` | `''`
[csv_path_file](#csv-path-file) | `string` | `csv_path_file=file_list.csv` | `''`
[path_to_search](#path-to-search) | `string` | `path_to_search=my_data/fold_1` | NiftyNet home folder
[filename_contains](#filename-contains) | `string` or `string array` | `filename_contains=foo, bar` | `''`
[filename_not_contains](#filename-not-contains) | `string` or `string array` | `filename_not_contains=foo` | `''`
Expand All @@ -130,7 +130,7 @@ within each section.
[spatial_window_size](#spatial-window-size) | `integer array` | `spatial_window_size=64, 64, 64` | `''`
[loader](#loader) | `string` | `loader=simpleitk` | `None`

###### `csv_file`
###### `csv_path_file`
A file path to a list of input images. If the file exists, input image name
list will be loaded from the file; the filename based input image search will
be disabled; [path_to_search](#path-to-search),
Expand Down Expand Up @@ -221,8 +221,8 @@ with an interpolation order of `3`.

A CSV file with the matched filenames and extracted subject names will be
generated to `T1Image.csv` in [`model_dir`](#model-dir) (by default; the CSV
file location can be specified by setting [csv_file](#csv-file)). To exclude
particular images, the [csv_file](#csv-file) can be edited manually.
file location can be specified by setting [csv_path_file](#csv-path-file)). To exclude
particular images, the [csv_path_file](#csv-path-file) can be edited manually.

This input source can be used alone, as a `T1` MRI input to an application.
It can also be used along with other modalities, a multi-modality example
Expand Down
1 change: 0 additions & 1 deletion niftynet/contrib/csv_reader/classification_application.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,6 @@ def switch_sampler(for_training):
data_dict = switch_sampler(for_training=True)

image = tf.cast(data_dict['image'], tf.float32)
print(self.sampler[0][0]()['label'])
net_args = {'is_training': self.is_training,
'keep_prob': self.net_param.keep_prob}
net_out = self.net(image, **net_args)
Expand Down
2 changes: 1 addition & 1 deletion niftynet/io/image_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,7 @@ def _filename_to_image_list(file_list, mod_dict, data_param):
if not volume_list:
tf.logging.fatal(
"Empty filename lists, please check the csv "
"files. (removing csv_file keyword if it is in the config file "
"files. (removing csv_path_file keyword if it is in the config file "
"to automatically search folders and generate new csv "
"files again)\n\n"
"Please note in the matched file names, each subject id are "
Expand Down
2 changes: 1 addition & 1 deletion niftynet/layer/bn.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,5 +135,5 @@ def layer_op(self, inputs):
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format='NHWC',
data_format='NWC',
scope=None)
2 changes: 1 addition & 1 deletion niftynet/layer/loss_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,6 @@ def cross_entropy(prediction,
:return: the loss
"""
ground_truth = tf.to_int64(ground_truth)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=ground_truth)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=ground_truth)
return loss

1 change: 0 additions & 1 deletion niftynet/layer/spatial_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ def layer_op(self,field):
for d in [0, 1, 2]]
resampled=tf.stack(resampled_list,5)
permuted_shape=[batch_size]+[f-3 for f in self._coeff_shape]+self._knot_spacing+[spatial_rank]
print(permuted_shape)
permuted=tf.transpose(tf.reshape(resampled,permuted_shape),[0,1,4,2,5,3,6,7])
valid_size=[(f-3)*k for f,k in zip(self._coeff_shape,self._knot_spacing)]
reshaped=tf.reshape(permuted,[batch_size]+valid_size+[spatial_rank])
Expand Down
18 changes: 9 additions & 9 deletions tests/image_reader_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
# test multiple modalities
MULTI_MOD_DATA = {
'T1': ParserNamespace(
csv_file=os.path.join('testing_data', 'T1reader.csv'),
csv_path_file=os.path.join('testing_data', 'T1reader.csv'),
path_to_search='testing_data',
filename_contains=('_o_T1_time',),
filename_not_contains=('Parcellation',),
Expand All @@ -28,7 +28,7 @@
loader=None
),
'FLAIR': ParserNamespace(
csv_file=os.path.join('testing_data', 'FLAIRreader.csv'),
csv_path_file=os.path.join('testing_data', 'FLAIRreader.csv'),
path_to_search='testing_data',
filename_contains=('FLAIR_',),
filename_not_contains=('Parcellation',),
Expand All @@ -43,7 +43,7 @@
# test single modalities
SINGLE_MOD_DATA = {
'lesion': ParserNamespace(
csv_file=os.path.join('testing_data', 'lesion.csv'),
csv_path_file=os.path.join('testing_data', 'lesion.csv'),
path_to_search='testing_data',
filename_contains=('Lesion',),
filename_not_contains=('Parcellation',),
Expand All @@ -57,7 +57,7 @@

EXISTING_DATA = {
'lesion': ParserNamespace(
csv_file=os.path.join('testing_data', 'lesion.csv'),
csv_path_file=os.path.join('testing_data', 'lesion.csv'),
interp_order=3,
pixdim=None,
axcodes=None,
Expand All @@ -68,7 +68,7 @@
# test labels
LABEL_DATA = {
'parcellation': ParserNamespace(
csv_file=os.path.join('testing_data', 'labels.csv'),
csv_path_file=os.path.join('testing_data', 'labels.csv'),
path_to_search='testing_data',
filename_contains=('Parcellation',),
filename_not_contains=('Lesion',),
Expand All @@ -82,7 +82,7 @@

BAD_DATA = {
'lesion': ParserNamespace(
csv_file=os.path.join('testing_data', 'lesion.csv'),
csv_path_file=os.path.join('testing_data', 'lesion.csv'),
path_to_search='testing_data',
filename_contains=('Lesion',),
filename_not_contains=('Parcellation',),
Expand All @@ -96,7 +96,7 @@

IMAGE_2D_DATA = {
'color_images': ParserNamespace(
csv_file=os.path.join('testing_data', 'images_2d_u.csv'),
csv_path_file=os.path.join('testing_data', 'images_2d_u.csv'),
path_to_search=os.path.join('testing_data', 'images2d'),
filename_contains=('_u.png',),
interp_order=1,
Expand All @@ -105,7 +105,7 @@
loader=None
),
'gray_images': ParserNamespace(
csv_file=os.path.join('testing_data', 'images_2d_g.csv'),
csv_path_file=os.path.join('testing_data', 'images_2d_g.csv'),
path_to_search=os.path.join('testing_data', 'images2d'),
filename_contains=('_g.png',),
interp_order=1,
Expand All @@ -114,7 +114,7 @@
loader=None
),
'seg_masks': ParserNamespace(
csv_file=os.path.join('testing_data', 'images_2d_m.csv'),
csv_path_file=os.path.join('testing_data', 'images_2d_m.csv'),
path_to_search=os.path.join('testing_data', 'images2d'),
filename_contains=('_m.png',),
interp_order=0,
Expand Down
8 changes: 4 additions & 4 deletions tests/reader_modular_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def test_reader_field(self):

def test_input_properties(self):
data_param = {'mr': {'path_to_search': IMAGE_PATH_2D,
'csv_file': '2d_test.csv'}}
'csv_path_file': '2d_test.csv'}}
reader = ImageReader().initialise(data_param)
self.default_property_asserts(reader)
idx, data, interp = reader()
Expand All @@ -151,7 +151,7 @@ def test_input_properties(self):

def test_no_2d_resampling_properties(self):
data_param = {'mr': {'path_to_search': IMAGE_PATH_2D,
'csv_file': '2d_test.csv',
'csv_path_file': '2d_test.csv',
'pixdim': (2, 2, 2),
'axcodes': 'RAS'}}
reader = ImageReader().initialise(data_param)
Expand Down Expand Up @@ -199,7 +199,7 @@ class Read2D_1DTest(tf.test.TestCase):
# loading 2d images of rank 3: [x, y, 1]
def test_no_2d_resampling_properties(self):
data_param = {'mr': {'path_to_search': IMAGE_PATH_2D_1,
'csv_file': '2d_test.csv',
'csv_path_file': '2d_test.csv',
'filename_contains': '_img',
'pixdim': (2, 2, 2),
'axcodes': 'RAS'}}
Expand Down Expand Up @@ -282,7 +282,7 @@ class Read2D_colorTest(tf.test.TestCase):
# loading 2d images of rank 3: [x, y, 3] or [x, y, 4]
def test_no_2d_resampling_properties(self):
data_param = {'mr': {'path_to_search': IMAGE_PATH_2D,
'csv_file': '2d_test.csv',
'csv_path_file': '2d_test.csv',
'filename_contains': '_u',
'pixdim': (2, 2, 2),
'axcodes': 'RAS'}}
Expand Down

0 comments on commit 89b0aaf

Please sign in to comment.