-
Notifications
You must be signed in to change notification settings - Fork 6
/
patches.py
207 lines (187 loc) · 8.68 KB
/
patches.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
import numpy as np
import pdb
from helper import print_red
import h5py
import itertools
# from tqdm import tqdm
from tqdm.notebook import tqdm
def _patching_autofit(img_shape, patch_shape):
'''
Auto-fitting patching strategy:
Symmetrically cover the image with the least number of patches.
img_shape: numpy.ndarray; shape = (3,)
patch_shape: numpy.ndarray; shape = (3,)
'''
n_dim = len(img_shape)
n_patches = np.ceil(img_shape / patch_shape)
start = np.zeros(n_dim)
step = np.zeros(n_dim)
for dim in range(n_dim):
if n_patches[dim] == 1:
start[dim] = -(patch_shape[dim] - img_shape[dim])//2
step[dim] = patch_shape[dim]
else:
overlap = np.floor(n_patches[dim] * patch_shape[dim] - img_shape[dim])/(n_patches[dim] - 1)
overflow = n_patches[dim] * patch_shape[dim] - (n_patches[dim] - 1) * overlap - img_shape[dim]
start[dim] = - overflow//2
step[dim] = patch_shape[dim] - overlap
stop = start + n_patches * step
patches = get_set_of_patch_indices(start, stop, step)
# add the centeric cube:
patches = np.vstack((patches, (img_shape - patch_shape)//2))
return patches
def patching(img_shape, patch_shape, overlap = None, both_ps=False):
'''
Patching strategy with a given overlap.
img_shape: numpy.ndarray or tuple; shape = (3,)
patch_shape: numpy.ndarray or tuple; shape = (3,)
overlap: int or tuple or numpy.ndarray;
shape = (3,);
If None, only return the auto-fitting patches,
otherwise symmetrically cover the image with the least number of overlapped patches.
This is for the augmentation consideration to verify the diversity of input samples.
It may not be compulsary.
both_ps: if True, use both patching strategies.
Return list of bottom left corner coordinate of patches.
'''
# pdb.set_trace()
img_shape = np.asarray(img_shape)
patch_shape = np.asarray(patch_shape)
auto_patches = _patching_autofit(img_shape, patch_shape)
if overlap is None:
return auto_patches
if isinstance(overlap, int):
overlap = np.asarray([overlap] * len(img_shape))
else:
overlap = np.asarray(overlap)
n_patches = np.ceil(img_shape / (patch_shape - overlap))
overflow = patch_shape * n_patches - (n_patches - 1) * overlap - img_shape
start = -overflow//2
step = patch_shape - overlap
stop = start + n_patches * step
ol_patches = np.vstack(((img_shape - patch_shape)//2,get_set_of_patch_indices(start, stop, step)))
return np.vstack((auto_patches, ol_patches)) if both_ps else ol_patches
def get_set_of_patch_indices(start, stop, step):
return np.asarray(np.mgrid[start[0]:stop[0]:step[0], start[1]:stop[1]:step[1],
start[2]:stop[2]:step[2]].reshape(3, -1).T, dtype=np.int)
def create_id_index_patch_list(id_index_list, data_file, patch_shape,
patch_overlap=None, both_ps=False, trivial=True):
'''
id_index_list: id_index is the index of .h5.keys()
data_file: .h5 file path
patch_shape: numpy.ndarray or tuple; shape = (3,)
patch_overlap: overlap among patches
both_ps: if True, use both patching strategies.
trivial: If True, use tqdm.
Return: list of (index of .h5.keys(), bottom left corner coordinates of one patch)
'''
id_index_patch_list = []
with h5py.File(data_file,'r') as h5_file:
id_list = list(h5_file.keys())
for index in tqdm(id_index_list,desc = 'Creating (id_index, patch_corner) list') if trivial else id_index_list:
brain_width = h5_file[id_list[index]]['brain_width']
brain_wide_img_shape = brain_width[1] - brain_width[0] + 1
patches = patching(brain_wide_img_shape, patch_shape, overlap = patch_overlap, both_ps=both_ps)
id_index_patch_list.extend(itertools.product([index], patches))
return id_index_patch_list
def get_patch_from_3d_data(data, patch_shape, patch_corner):
"""
Returns a 4D patch from a 4D image.
data: 4D image shape=(4,_,_,_)
patch_shape: numpy.ndarray or tuple; shape = (3,)
patch_corner: bottom left corner coordinates of one patch.
return: shape=(4,_,_,_)
"""
patch_corner = np.asarray(patch_corner, dtype=np.int16)
patch_shape = np.asarray(patch_shape)
img_shape = data.shape[-3:]
if np.any(patch_corner < 0) or np.any((patch_corner + patch_shape) > img_shape):
data, patch_corner = fix_out_of_bound_patch_attempt(data, patch_shape, patch_corner)
return data[:,
patch_corner[0]:patch_corner[0]+patch_shape[0],
patch_corner[1]:patch_corner[1]+patch_shape[1],
patch_corner[2]:patch_corner[2]+patch_shape[2]]
def get_data_from_file(data_file, id_index_patch, patch_shape):
'''
Load image patch from .h5 file and mix 4 modalities into one 4d ndarray.
data_file: .h5 file of datasets.
id_index_patch: tuple, (id_index is the index of .h5.keys(),
patch is the patch corner coordinate).
patch_shape: numpy.ndarray or tuple; shape = (3,)
Return x.shape = (4,_,_,_);
y.shape = (1,_,_,_) for training dataset with seg.nii.gz;
y = 0 for validation and test datasets
'''
# pdb.set_trace()
id_index, corner = id_index_patch
with h5py.File(data_file,'r') as h5_file:
sub_id = list(h5_file.keys())[id_index]
brain_width = h5_file[sub_id]['brain_width']
data = []
truth = []
for name, img in h5_file[sub_id].items():
if name == 'brain_width':
continue
brain_wise_img = img[brain_width[0,0]:brain_width[1,0]+1,
brain_width[0,1]:brain_width[1,1]+1,
brain_width[0,2]:brain_width[1,2]+1]
if name.split('_')[-1].split('.')[0] == 'seg':
truth.append(brain_wise_img)
else:
data.append(brain_wise_img)
x = get_patch_from_3d_data(np.asarray(data), patch_shape, corner)
y = get_patch_from_3d_data(np.asarray(truth), patch_shape, corner) if truth else None
return x, y
def fix_out_of_bound_patch_attempt(data, patch_shape, patch_corner, ndim=3):
"""
Pads the data and alters the patch index so that a patch will be correct.
data: 4D image shape=(4,_,_,_)
patch_shape: shape=(3,)
patch_corner: bottom left corner coordinates of one patch.
"""
img_shape = data.shape[-ndim:]
pad_before = np.abs((patch_corner < 0) * patch_corner)
pad_after = np.abs(((patch_corner + patch_shape) > img_shape) *
((patch_corner + patch_shape) - img_shape))
pad_args = np.stack([pad_before, pad_after], axis=1)
if pad_args.shape[0] < len(data.shape):
pad_args = [[0, 0]] * (len(data.shape) - pad_args.shape[0]) + pad_args.tolist()
# data = np.pad(data, pad_args, mode="edge")
data = np.pad(data, pad_args, 'constant',constant_values=0)
patch_corner += pad_before
return data, patch_corner
def stitch(patch_list, patch_corners, data_shape):
'''
To put patches together.
Overlapped places would take the mean value.
patch_list: one list of ndarray patches, patch_shape=(3,_,_,_)
patch_corners: bottom left corner coordinates of patches.
data_shape: brain-wise shape after stitching, shape=(3,_,_,_)
Return the brain-wised predicted ndarray shape=data_shape
'''
# pdb.set_trace()
data = np.zeros(data_shape)
img_shape = data_shape[-3:]
count = np.zeros(data_shape)
patch_shape = patch_list[0].shape[-3:]
for patch, corner in zip(patch_list, patch_corners):
if np.any(corner < 0):
start_edge = np.asarray((corner < 0) * np.abs(corner), dtype=np.int)
patch = patch[:, start_edge[0]:, start_edge[1]:, start_edge[2]:]
corner[corner < 0] = 0
if np.any((corner + patch_shape) >= img_shape):
end_edge = np.asarray(patch_shape - (((corner + patch_shape) >= img_shape)
*((corner + patch_shape) - img_shape)),
dtype=np.int)
patch = patch[:, :end_edge[0], :end_edge[1], :end_edge[2]]
patch_index = np.zeros(data_shape, dtype=np.bool)
patch_index[:,
corner[0]:corner[0]+patch.shape[-3],
corner[1]:corner[1]+patch.shape[-2],
corner[2]:corner[2]+patch.shape[-1]] = True
data[patch_index] += patch.flatten()
count[patch_index] += 1
if np.any(count==0):
print_red('Some empty place during stitching!')
count[count==0] = 1
return data/count