12
12
# See the License for the specific language governing permissions and
13
13
# limitations under the License.
14
14
15
+ import glob
16
+
15
17
# IMPORTS
16
18
import time
17
- import glob
18
- from os .path import join , dirname
19
19
from collections import defaultdict
20
- from typing import Tuple , Dict
20
+ from os .path import dirname , join
21
+ from typing import Dict , Tuple
21
22
22
- import numpy as np
23
- import nibabel as nib
24
23
import h5py
25
- from numpy import typing as npt , ndarray
24
+ import nibabel as nib
25
+ import numpy as np
26
+ from numpy import ndarray
27
+ from numpy import typing as npt
26
28
27
29
from FastSurferCNN .data_loader .data_utils import (
28
- transform_axial ,
29
- transform_sagittal ,
30
- map_aparc_aseg2label ,
31
30
create_weight_mask ,
32
- get_thick_slices ,
33
31
filter_blank_slices_thick ,
34
- read_classes_from_lut ,
35
32
get_labels_from_lut ,
33
+ get_thick_slices ,
34
+ map_aparc_aseg2label ,
35
+ read_classes_from_lut ,
36
+ transform_axial ,
37
+ transform_sagittal ,
36
38
unify_lateralized_labels ,
37
39
)
38
40
from FastSurferCNN .utils import logging
41
43
42
44
43
45
class H5pyDataset :
44
- """Class representing H5py Dataset.
46
+ """
47
+ Class representing H5py Dataset.
45
48
46
- Methods
47
- -------
48
- __init__
49
- Consturctor
50
- _load_volumes
51
- load image and segmentation volume
52
- transform
53
- Transform image along axis
54
- _pad_image
55
- Pad image with zeroes
56
- create_hdf5_dataset
57
- Create a hdf5 file
58
-
59
49
Attributes
60
50
----------
61
51
dataset_name : str
@@ -99,35 +89,54 @@ class H5pyDataset:
99
89
Number of subjects
100
90
processing : str
101
91
Use aseg, aparc or no specific mapping processing (Default: "aparc")
92
+
93
+ Methods
94
+ -------
95
+ __init__
96
+ Consturctor
97
+ _load_volumes
98
+ load image and segmentation volume
99
+ transform
100
+ Transform image along axis
101
+ _pad_image
102
+ Pad image with zeroes
103
+ create_hdf5_dataset
104
+ Create a hdf5 file
102
105
"""
103
106
104
107
def __init__ (self , params : Dict , processing : str = "aparc" ):
105
- """Construct H5pyDataset object.
108
+ """
109
+ Construct H5pyDataset object.
106
110
107
111
Parameters
108
112
----------
109
113
params : Dict
110
- dataset_name (str): path and name of hdf5-data_loader
111
- data_path (str): Directory with images to load
112
- thickness (int): Number of pre- and succeeding slices
113
- image_name (str): Default name of original images
114
- gt_name (str): Default name for ground truth segmentations.
115
- gt_nocc (str): Segmentation without corpus callosum (used to mask this segmentation in ground truth).
116
- If the used segmentation was already processed, do not set this argument."
117
- sizes (int): Sizes of images in the dataset.
118
- max_weight (int): Overall max weight for any voxel in weight mask.
119
- edge_weight (int): Weight for edges in weight mask.
120
- hires_weight (int): Weight for hires elements (sulci, WM strands, cortex border) in weight mask.
121
- gradient (bool): Turn on to only use median weight frequency (no gradient)
122
- gm_mask (bool): Turn on to add cortex mask for hires-processing.
123
- lut (str): FreeSurfer-style Color Lookup Table with labels to use in final prediction.
114
+ A dictionary containing the following keys:
115
+ - dataset_name (str): Path and name of hdf5-data_loader
116
+ - data_path (str): Directory with images to load
117
+ - thickness (int): Number of pre- and succeeding slices
118
+ - image_name (str): Default name of original images
119
+ - gt_name (str): Default name for ground truth segmentations.
120
+ - gt_nocc (str): Segmentation without corpus callosum (used to mask this segmentation in ground truth).
121
+ If the used segmentation was already processed, do not set this argument.
122
+ - sizes (int): Sizes of images in the dataset.
123
+ - max_weight (int): Overall max weight for any voxel in the weight mask.
124
+ - edge_weight (int): Weight for edges in the weight mask.
125
+ - hires_weight (int): Weight for hires elements (sulci, WM strands, cortex border) in the weight mask.
126
+ - gradient (bool): Turn on to only use median weight frequency (no gradient)
127
+ - gm_mask (bool): Turn on to add cortex mask for hires-processing.
128
+ - lut (str): FreeSurfer-style Color Lookup Table with labels to use in the final prediction.
124
129
Has to have columns: ID LabelName R G B A
125
- sag-mask (tuple[str, str, ... ]): Suffixes of labels names to mask for final sagittal labels.
126
- combi (str): Suffixes of labels names to combine.
127
- patter (str): Pattern to match files in directory.
128
- processing : str
129
- Use aseg (Default value = "aparc")
130
+ - sag_mask (tuple[str, str]): Suffixes of labels names to mask for final sagittal labels.
131
+ - combi (str): Suffixes of labels names to combine.
132
+ - pattern (str): Pattern to match files in the directory.
133
+ processing : str, optional
134
+ Use aseg (Default value = "aparc").
130
135
136
+ Returns
137
+ -------
138
+ None
139
+ This is a constructor function, it returns nothing.
131
140
"""
132
141
self .dataset_name = params ["dataset_name" ]
133
142
self .data_path = params ["data_path" ]
@@ -159,28 +168,29 @@ def __init__(self, params: Dict, processing: str = "aparc"):
159
168
160
169
self .data_set_size = len (self .subject_dirs )
161
170
162
- def _load_volumes (self , subject_path : str
163
- ) -> Tuple [np .ndarray , np .ndarray , np .ndarray , Tuple ]:
164
- """Load the given image and segmentation and gets the zoom values.
171
+ def _load_volumes (
172
+ self , subject_path : str
173
+ ) -> Tuple [np .ndarray , np .ndarray , np .ndarray , Tuple ]:
174
+ """
175
+ Load the given image and segmentation and gets the zoom values.
165
176
166
177
Checks if an aseg-nocc file is set and loads it instead
167
178
168
179
Parameters
169
180
----------
170
181
subject_path : str
171
- path to subjectfile
182
+ Path to subject file.
172
183
173
184
Returns
174
185
-------
175
186
ndarray
176
- original image
187
+ Original image.
177
188
ndarray
178
- segmentation ground truth
189
+ Segmentation ground truth.
179
190
ndarray
180
- segmentation ground truth without corpus callosum
191
+ Segmentation ground truth without corpus callosum.
181
192
tuple
182
- zoom values
183
-
193
+ Zoom values.
184
194
"""
185
195
# Load the orig and extract voxel spacing information (x, y, and z dim)
186
196
LOGGER .info (
@@ -205,26 +215,27 @@ def _load_volumes(self, subject_path: str
205
215
206
216
return orig , aseg , aseg_nocc , zoom
207
217
208
- def transform (self , plane : str , imgs : npt .NDArray , zoom : npt .NDArray
209
- ) -> Tuple [npt .NDArray , npt .NDArray ]:
210
- """Transform the image and zoom along the given axis.
218
+ def transform (
219
+ self , plane : str , imgs : npt .NDArray , zoom : npt .NDArray
220
+ ) -> Tuple [npt .NDArray , npt .NDArray ]:
221
+ """
222
+ Transform the image and zoom along the given axis.
211
223
212
224
Parameters
213
225
----------
214
226
plane : str
215
- plane (sagittal, axial, )
227
+ Plane (sagittal, axial, ).
216
228
imgs : npt.NDArray
217
- input image
229
+ Input image.
218
230
zoom : npt.NDArray
219
- zoom factors
231
+ Zoom factors.
220
232
221
233
Returns
222
234
-------
223
235
npt.NDArray
224
- transformed image,
236
+ Transformed image.
225
237
npt.NDArray
226
- transformed zoom facors
227
-
238
+ Transformed zoom facors.
228
239
"""
229
240
for i in range (len (imgs )):
230
241
if self .plane == "sagittal" :
@@ -238,20 +249,20 @@ def transform(self, plane: str, imgs: npt.NDArray, zoom: npt.NDArray
238
249
return imgs , zooms
239
250
240
251
def _pad_image (self , img : npt .NDArray , max_out : int ) -> np .ndarray :
241
- """Pad the margins of the input image with zeros.
252
+ """
253
+ Pad the margins of the input image with zeros.
242
254
243
255
Parameters
244
256
----------
245
257
img : npt.NDArray
246
- image array
258
+ Image array.
247
259
max_out : int
248
- size of output image
260
+ Size of output image.
249
261
250
262
Returns
251
263
-------
252
264
np.ndarray
253
- 0-padded image to the given size
254
-
265
+ 0-padded image to the given size.
255
266
"""
256
267
# Get correct size = max along shape
257
268
h , w , d = img .shape
@@ -261,19 +272,18 @@ def _pad_image(self, img: npt.NDArray, max_out: int) -> np.ndarray:
261
272
return padded_img
262
273
263
274
def create_hdf5_dataset (self , blt : int ):
264
- """Create a hdf5 dataset.
275
+ """
276
+ Create a hdf5 dataset.
265
277
266
278
Parameters
267
279
----------
268
280
blt : int
269
- Blank sliec threshold
270
-
281
+ Blank sliec threshold.
271
282
"""
272
283
data_per_size = defaultdict (lambda : defaultdict (list ))
273
284
start_d = time .time ()
274
285
275
286
for idx , current_subject in enumerate (self .subject_dirs ):
276
-
277
287
try :
278
288
start = time .time ()
279
289
0 commit comments