Skip to content

Commit f279b3d

Browse files
Numpy docstrings formated
1 parent be6afb2 commit f279b3d

28 files changed

+2757
-1791
lines changed

FastSurferCNN/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
"quick_qc",
2222
"reduce_to_aseg",
2323
"run_prediction",
24+
"run_model",
2425
"segstats",
2526
"train",
2627
]

FastSurferCNN/generate_hdf5.py

+82-72
Original file line numberDiff line numberDiff line change
@@ -12,27 +12,29 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
import glob
16+
1517
# IMPORTS
1618
import time
17-
import glob
18-
from os.path import join, dirname
1919
from collections import defaultdict
20-
from typing import Tuple, Dict
20+
from os.path import dirname, join
21+
from typing import Dict, Tuple
2122

22-
import numpy as np
23-
import nibabel as nib
2423
import h5py
25-
from numpy import typing as npt, ndarray
24+
import nibabel as nib
25+
import numpy as np
26+
from numpy import ndarray
27+
from numpy import typing as npt
2628

2729
from FastSurferCNN.data_loader.data_utils import (
28-
transform_axial,
29-
transform_sagittal,
30-
map_aparc_aseg2label,
3130
create_weight_mask,
32-
get_thick_slices,
3331
filter_blank_slices_thick,
34-
read_classes_from_lut,
3532
get_labels_from_lut,
33+
get_thick_slices,
34+
map_aparc_aseg2label,
35+
read_classes_from_lut,
36+
transform_axial,
37+
transform_sagittal,
3638
unify_lateralized_labels,
3739
)
3840
from FastSurferCNN.utils import logging
@@ -41,21 +43,9 @@
4143

4244

4345
class H5pyDataset:
44-
"""Class representing H5py Dataset.
46+
"""
47+
Class representing H5py Dataset.
4548
46-
Methods
47-
-------
48-
__init__
49-
Consturctor
50-
_load_volumes
51-
load image and segmentation volume
52-
transform
53-
Transform image along axis
54-
_pad_image
55-
Pad image with zeroes
56-
create_hdf5_dataset
57-
Create a hdf5 file
58-
5949
Attributes
6050
----------
6151
dataset_name : str
@@ -99,35 +89,54 @@ class H5pyDataset:
9989
Number of subjects
10090
processing : str
10191
Use aseg, aparc or no specific mapping processing (Default: "aparc")
92+
93+
Methods
94+
-------
95+
__init__
96+
Consturctor
97+
_load_volumes
98+
load image and segmentation volume
99+
transform
100+
Transform image along axis
101+
_pad_image
102+
Pad image with zeroes
103+
create_hdf5_dataset
104+
Create a hdf5 file
102105
"""
103106

104107
def __init__(self, params: Dict, processing: str = "aparc"):
105-
"""Construct H5pyDataset object.
108+
"""
109+
Construct H5pyDataset object.
106110
107111
Parameters
108112
----------
109113
params : Dict
110-
dataset_name (str): path and name of hdf5-data_loader
111-
data_path (str): Directory with images to load
112-
thickness (int): Number of pre- and succeeding slices
113-
image_name (str): Default name of original images
114-
gt_name (str): Default name for ground truth segmentations.
115-
gt_nocc (str): Segmentation without corpus callosum (used to mask this segmentation in ground truth).
116-
If the used segmentation was already processed, do not set this argument."
117-
sizes (int): Sizes of images in the dataset.
118-
max_weight (int): Overall max weight for any voxel in weight mask.
119-
edge_weight (int): Weight for edges in weight mask.
120-
hires_weight (int): Weight for hires elements (sulci, WM strands, cortex border) in weight mask.
121-
gradient (bool): Turn on to only use median weight frequency (no gradient)
122-
gm_mask (bool): Turn on to add cortex mask for hires-processing.
123-
lut (str): FreeSurfer-style Color Lookup Table with labels to use in final prediction.
114+
A dictionary containing the following keys:
115+
- dataset_name (str): Path and name of hdf5-data_loader
116+
- data_path (str): Directory with images to load
117+
- thickness (int): Number of pre- and succeeding slices
118+
- image_name (str): Default name of original images
119+
- gt_name (str): Default name for ground truth segmentations.
120+
- gt_nocc (str): Segmentation without corpus callosum (used to mask this segmentation in ground truth).
121+
If the used segmentation was already processed, do not set this argument.
122+
- sizes (int): Sizes of images in the dataset.
123+
- max_weight (int): Overall max weight for any voxel in the weight mask.
124+
- edge_weight (int): Weight for edges in the weight mask.
125+
- hires_weight (int): Weight for hires elements (sulci, WM strands, cortex border) in the weight mask.
126+
- gradient (bool): Turn on to only use median weight frequency (no gradient)
127+
- gm_mask (bool): Turn on to add cortex mask for hires-processing.
128+
- lut (str): FreeSurfer-style Color Lookup Table with labels to use in the final prediction.
124129
Has to have columns: ID LabelName R G B A
125-
sag-mask (tuple[str, str, ...]): Suffixes of labels names to mask for final sagittal labels.
126-
combi (str): Suffixes of labels names to combine.
127-
patter (str): Pattern to match files in directory.
128-
processing : str
129-
Use aseg (Default value = "aparc")
130+
- sag_mask (tuple[str, str]): Suffixes of labels names to mask for final sagittal labels.
131+
- combi (str): Suffixes of labels names to combine.
132+
- pattern (str): Pattern to match files in the directory.
133+
processing : str, optional
134+
Use aseg (Default value = "aparc").
130135
136+
Returns
137+
-------
138+
None
139+
This is a constructor function, it returns nothing.
131140
"""
132141
self.dataset_name = params["dataset_name"]
133142
self.data_path = params["data_path"]
@@ -159,28 +168,29 @@ def __init__(self, params: Dict, processing: str = "aparc"):
159168

160169
self.data_set_size = len(self.subject_dirs)
161170

162-
def _load_volumes(self, subject_path: str
163-
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Tuple]:
164-
"""Load the given image and segmentation and gets the zoom values.
171+
def _load_volumes(
172+
self, subject_path: str
173+
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Tuple]:
174+
"""
175+
Load the given image and segmentation and gets the zoom values.
165176
166177
Checks if an aseg-nocc file is set and loads it instead
167178
168179
Parameters
169180
----------
170181
subject_path : str
171-
path to subjectfile
182+
Path to subject file.
172183
173184
Returns
174185
-------
175186
ndarray
176-
original image
187+
Original image.
177188
ndarray
178-
segmentation ground truth
189+
Segmentation ground truth.
179190
ndarray
180-
segmentation ground truth without corpus callosum
191+
Segmentation ground truth without corpus callosum.
181192
tuple
182-
zoom values
183-
193+
Zoom values.
184194
"""
185195
# Load the orig and extract voxel spacing information (x, y, and z dim)
186196
LOGGER.info(
@@ -205,26 +215,27 @@ def _load_volumes(self, subject_path: str
205215

206216
return orig, aseg, aseg_nocc, zoom
207217

208-
def transform(self, plane: str, imgs: npt.NDArray, zoom: npt.NDArray
209-
) -> Tuple[npt.NDArray, npt.NDArray]:
210-
"""Transform the image and zoom along the given axis.
218+
def transform(
219+
self, plane: str, imgs: npt.NDArray, zoom: npt.NDArray
220+
) -> Tuple[npt.NDArray, npt.NDArray]:
221+
"""
222+
Transform the image and zoom along the given axis.
211223
212224
Parameters
213225
----------
214226
plane : str
215-
plane (sagittal, axial, )
227+
Plane (sagittal, axial, ).
216228
imgs : npt.NDArray
217-
input image
229+
Input image.
218230
zoom : npt.NDArray
219-
zoom factors
231+
Zoom factors.
220232
221233
Returns
222234
-------
223235
npt.NDArray
224-
transformed image,
236+
Transformed image.
225237
npt.NDArray
226-
transformed zoom facors
227-
238+
Transformed zoom facors.
228239
"""
229240
for i in range(len(imgs)):
230241
if self.plane == "sagittal":
@@ -238,20 +249,20 @@ def transform(self, plane: str, imgs: npt.NDArray, zoom: npt.NDArray
238249
return imgs, zooms
239250

240251
def _pad_image(self, img: npt.NDArray, max_out: int) -> np.ndarray:
241-
"""Pad the margins of the input image with zeros.
252+
"""
253+
Pad the margins of the input image with zeros.
242254
243255
Parameters
244256
----------
245257
img : npt.NDArray
246-
image array
258+
Image array.
247259
max_out : int
248-
size of output image
260+
Size of output image.
249261
250262
Returns
251263
-------
252264
np.ndarray
253-
0-padded image to the given size
254-
265+
0-padded image to the given size.
255266
"""
256267
# Get correct size = max along shape
257268
h, w, d = img.shape
@@ -261,19 +272,18 @@ def _pad_image(self, img: npt.NDArray, max_out: int) -> np.ndarray:
261272
return padded_img
262273

263274
def create_hdf5_dataset(self, blt: int):
264-
"""Create a hdf5 dataset.
275+
"""
276+
Create a hdf5 dataset.
265277
266278
Parameters
267279
----------
268280
blt : int
269-
Blank sliec threshold
270-
281+
Blank sliec threshold.
271282
"""
272283
data_per_size = defaultdict(lambda: defaultdict(list))
273284
start_d = time.time()
274285

275286
for idx, current_subject in enumerate(self.subject_dirs):
276-
277287
try:
278288
start = time.time()
279289

0 commit comments

Comments
 (0)