-
Notifications
You must be signed in to change notification settings - Fork 3
/
utilss.py
345 lines (288 loc) · 13.3 KB
/
utilss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
"""
@Author: Sulaiman Vesal
Date: Tuesday, 04, 2021
"""
import numpy as np
from skimage import measure
import cv2
import os
from medpy import metric
def overlay_image(img, seg, pred=None):
"""
Overlay segmentation output on the image
:param img: input image e.g. Prostate MRI Slice
:param seg: segmentation mask
:param pred: whether we should overlay the prediction or not
:return:
"""
# Load images as greyscale but make main RGB so we can annotate in colour
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
seg = np.array(seg).astype(np.uint8)
if pred is not None:
pred = np.array(pred).astype(np.uint8)
# Dictionary giving RGB colour for label (segment label) - label 1 in red, label 2 in yellow
RGBforLabel = {1: (0, 255, 255)}
# Find external contours
contours, _ = cv2.findContours(seg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Iterate over all contours
for i, c in enumerate(contours):
# Find mean colour inside this contour by doing a masked mean
mask = np.zeros(seg.shape, np.uint8)
cv2.drawContours(mask, [c], -1, 255, thickness = 3)
# Get appropriate colour for this label
# label = 1 if mean > 1.0 else 1
colour = RGBforLabel.get(1)
# Outline contour in that colour on main image, line thickness=1
cv2.drawContours(img, [c], -1, colour, thickness = 3)
if pred is not None:
# Dictionary giving RGB colour for label (segment label) - label 1 in red, label 2 in yellow
RGBforLabel = {1: (0, 0, 255)}
# Find external contours
contours, _ = cv2.findContours(pred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Iterate over all contours
for i, c in enumerate(contours):
# Find mean colour inside this contour by doing a masked mean
mask = np.zeros(seg.shape, np.uint8)
cv2.drawContours(mask, [c], -1, 255, thickness = 3)
# DEBUG: cv2.imwrite(f"mask-{i}.png",mask)
mean, _, _, _ = cv2.mean(seg, mask=mask)
# DEBUG: print(f"i: {i}, mean: {mean}")
# Get appropriate colour for this label
# label = 1 if mean > 1.0 else 1
colour = RGBforLabel.get(1)
# Outline contour in that colour on main image, line thickness=1
cv2.drawContours(img, [c], -1, colour, thickness = 3)
return img
def to_categorical(mask, num_classes, channel='channel_first'):
"""
:param mask: binary mask image with size WxHxC
:param num_classes: number of classes within the binary mask
:param channel: whether the channel is first or last
:return:
"""
assert mask.ndim == 4, "mask should have 4 dims"
if channel != 'channel_first' and channel != 'channel_last':
assert False, r"channel should be either 'channel_first' or 'channel_last'"
assert num_classes > 1, "num_classes should be greater than 1"
unique = np.unique(mask)
assert len(unique) <= num_classes, "number of unique values should be smaller or equal to the num_classes"
assert np.max(unique) < num_classes, "maximum value in the mask should be smaller than the num_classes"
if mask.shape[1] == 1:
mask = np.squeeze(mask, axis=1)
if mask.shape[-1] == 1:
mask = np.squeeze(mask, axis=-1)
eye = np.eye(num_classes, dtype='uint8')
output = eye[mask]
if channel == 'channel_first':
output = np.moveaxis(output, -1, 1)
return output
def soft_to_hard_pred(pred, channel_axis=1):
"""
:param pred: model prediction with size BxCxWxH
:param channel_axis: which axis the data should be converted
:return: converting the soft probablity prediction to hard format
"""
max_value = np.max(pred, axis=channel_axis, keepdims=True)
return np.where(pred == max_value, 1, 0)
def keep_largest_connected_components(mask):
"""
Keeps only the largest connected components of each label for a segmentation mask.
:param mask: model prediction after binarization with size BxCxWxH
:return: removed the small over/under segmented regions from each class
"""
out_img = np.zeros(mask.shape, dtype=np.uint8)
for struc_id in [1]:
binary_img = mask == struc_id
blobs = measure.label(binary_img, connectivity=1)
props = measure.regionprops(blobs)
if not props:
continue
area = [ele.area for ele in props]
largest_blob_ind = np.argmax(area)
largest_blob_label = props[largest_blob_ind].label
out_img[blobs == largest_blob_label] = struc_id
return out_img
def resize_volume(vol, w=128, h=160, view='axial', is_img=True, cohort='UCL'):
"""
:param vol: the ultrasound volume with size WxHxC
:param w: the image width after resizing
:param h: the image height after resizing
:param view: from which view the image should be resized: axial, coronal or sagittal
:param is_img: wether the img_volume is a segmentation or image volume
:return: resized volume
"""
img_res = []
if cohort =='UCL':
# we loop over the last dimension,to get axial view images, since UCL data is different the axial view
# will be (z, x, y). Not a fancy indexing, can be corrected later
z = vol.shape[0]
for i in range(z):
if view == 'axial':
if is_img and i == 0:
slice_img = vol[0:i + 3, :, :]
slice_img = np.moveaxis(slice_img, 0, 1)
slice_img = np.moveaxis(slice_img, 1, 2)
elif is_img and i == z - 1:
slice_img = vol[i - 2:i + 1, :, :]
slice_img = np.moveaxis(slice_img, 0, 1)
slice_img = np.moveaxis(slice_img, 1, 2)
elif is_img:
slice_img = vol[i - 1:i + 2, :, :]
slice_img = np.moveaxis(slice_img, 0, 1)
slice_img = np.moveaxis(slice_img, 1, 2)
else:
slice_img = vol[i, :, :]
# need the rotation of needle sequences as it wrongly processed
# TODO: Anirudh will update the processed data so we don't need rotation.
img_res.append(cv2.resize(np.rot90(slice_img, 3), dsize=(h, w), interpolation=cv2.INTER_NEAREST))
return np.array(img_res)
else:
# we loop over the last dimension,to get axial view images, UCLA and Stanford axial view will be (x, y, z)
z = vol.shape[2]
# loop over z axis to extract 3 channel inputs for the model.
for i in range(z):
if is_img and i == 0:
slice_img = vol[:, :, 0:i + 3]
elif is_img and i == z - 1:
slice_img = vol[:, :, i - 2:i + 1]
elif is_img:
slice_img = vol[:, :, i - 1:i + 2]
else:
slice_img = vol[:, :, i]
img_res.append(cv2.resize(slice_img, dsize=(h, w), interpolation=cv2.INTER_NEAREST))
return np.array(img_res)
def resize_volume_back(vol, h=224, w=288, rotate_slice = True, flip_slice=False):
"""
:param vol: the ultrasound volume with size WxHxC
:param w: the image width after resizing
:param h: the image height after resizing
:param view: from which view the image should be resized: axial, coronal or sagittal
:param flip_slice: visually sea if the predictions need to be flipped horzintally, as the model somtimes segment
out of filed of view and I thought maybe slices are flipped due to rotation.
:return: resize the model prediction to its original volume size
"""
img_shape = vol.shape
resized_vol = []
z = img_shape[0]
for i in range(z):
view_slice = vol[i, :, :]
if flip_slice:
resized_vol.append(cv2.resize(np.fliplr(view_slice), (w, h), cv2.INTER_AREA))
elif rotate_slice:
resized_vol.append(cv2.resize(np.rot90(view_slice, 1), (w, h), cv2.INTER_AREA))
else:
# need the rotation of needle sequences as it wrongly processed
# TODO: Anirudh will update the processed data so we don't need rotation.
resized_vol.append(cv2.resize(view_slice, (w, h), cv2.INTER_AREA))
return np.array(resized_vol)
def lr_poly(base_lr, iter, max_iter, power):
""" Poly_LR scheduler
"""
return base_lr * ((1 - float(iter) / max_iter) ** power)
def _adjust_learning_rate(optimizer, i_iter, learning_rate):
lr = lr_poly(learning_rate, i_iter, 250000, 0.9)
optimizer.param_groups[0]['lr'] = lr
if len(optimizer.param_groups) > 1:
optimizer.param_groups[1]['lr'] = lr * 10
def adjust_learning_rate(optimizer, i_iter, cfg, learning_rate=2.5e-4):
""" adject learning rate for main segnet
"""
_adjust_learning_rate(optimizer, i_iter, learning_rate=learning_rate)
def adjust_learning_rate_discriminator(optimizer, i_iter, cfg):
_adjust_learning_rate(optimizer, i_iter, learning_rate=1e-4)
def metrics(img_gt, img_pred, voxel_size, label_index = 1):
"""
Adapted: https://www.creatis.insa-lyon.fr/Challenge/acdc/code/metrics_acdc.py
Function to compute the metrics between two segmentation maps given as input.
Parameters
----------
img_gt: np.array
Array of the ground truth segmentation map.
img_pred: np.array
Array of the predicted segmentation map.
voxel_size: list, tuple or np.array
The size of a voxel of the images used to compute the volumes.
Return
------
A list of metrics in this order, [Dice LV, Volume LV, Err LV(ml),
Dice RV, Volume RV, Err RV(ml), Dice MYO, Volume MYO, Err MYO(ml)]
"""
if img_gt.ndim != img_pred.ndim:
raise ValueError("The arrays 'img_gt' and 'img_pred' should have the "
"same dimension, {} against {}".format(img_gt.ndim,
img_pred.ndim))
res = []
# Loop on each classes of the input images
for c in [label_index]:
# Copy the gt image to not alterate the input
gt_c_i = np.copy(img_gt)
gt_c_i[gt_c_i != c] = 0
# Copy the pred image to not alterate the input
pred_c_i = np.copy(img_pred)
pred_c_i[pred_c_i != c] = 0
# Compute the Dice
dice = metric.binary.dc(gt_c_i, pred_c_i)
# Compute the Dice
jac = metric.binary.jc(gt_c_i, pred_c_i)
# Compute the Housdurf distance
try:
hdd = metric.binary.hd95(gt_c_i, pred_c_i, voxelspacing=voxel_size)
except RuntimeError:
print("RuntimeError('The first supplied array does not contain any binary object.')")
hdd = 0
# Compute the Assymetricd distance
try:
assd = metric.binary.assd(gt_c_i, pred_c_i, voxelspacing=voxel_size)
except RuntimeError:
print("RuntimeError('The first supplied array does not contain any binary object.')")
assd = 0
# Compute the Housdurf distance
sen = metric.binary.sensitivity(gt_c_i, pred_c_i)
# Compute the Housdurf distance
spe = metric.binary.specificity(gt_c_i, pred_c_i)
# Clip the value to compute the volumes
gt_c_i = np.clip(gt_c_i, 0, 1)
pred_c_i = np.clip(pred_c_i, 0, 1)
# Compute the Dice
# Computes the linear correlation in binary object volume between the contents
# of the successive binary images supplied. Measured through the
# Pearson product-moment correlation coefficient.
vol, p_value = metric.binary.volume_correlation(gt_c_i, pred_c_i)
# Compute volume
volgt = gt_c_i.sum() * np.prod(voxel_size) / 1000.
volpred = pred_c_i.sum() * np.prod(voxel_size) / 1000.
res += [dice,jac, hdd, assd, sen, spe, vol, p_value, volgt, volpred, volpred-volgt]
return res
def compute_metrics_on_files(path_gt, gt, pred, voxel_size, header=True, label_index = 1):
"""
Function to give the metrics for two files
Parameters
----------
path_gt: string
Path of the ground truth image.
path_pred: string
Path of the predicted image.
"""
HEADER = ["Name ", "Dice [%] ", "Jac [%] ", "HD [mm]", "ASSD [mm]",
"Sensitivity [%]", "Specificity [%]", "Vol-Corr ", "P-Value", "gt-Vol", "pred-vol", "vol-err"]
name = os.path.basename(path_gt)
name = name.split('.')[0]
res = metrics(gt, pred, voxel_size, label_index=label_index)
res = ["{:.3f}".format(r) for r in res]
formatting = "{:>8}, {:>8}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}"
if header:
print(formatting.format(*HEADER))
print(formatting.format(name, *res))
if __name__ == '__main__':
pred = np.random.rand(2, 3, 3)
print(pred)
print(soft_to_hard_pred(pred, 0))
input()
eye = np.eye(3, dtype='uint8')
mask = np.array([[1, 1, 1, 1], [1, 2, 2, 1], [1, 2, 3, 1], [1, 1, 1, 1]]) - 1
print(mask)
mask1 = np.array([[2, 2, 2, 2], [1, 1, 2, 2], [1, 1, 1, 1], [3, 3, 3, 3]]) - 1
print(mask1)
mask = np.array([mask, mask1])
mask = to_categorical(mask=mask, num_classes=3, channel='channel_first')
input()