-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathresult_fusion_gta.py
executable file
·346 lines (291 loc) · 17.2 KB
/
result_fusion_gta.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
# import the needed packages
# from result_fusion import Fusion
from fusion.fusion_gta import Fusion_GTA
import os
import cv2
from matplotlib import pyplot as plt
import numpy as np
from tools.iou_perimg import SegmentationMetrics
import tqdm
from config import config_gta as config
if __name__ == '__main__':
### folder pathes
# the path to the sam mask
mask_folder = config.mask_folder
mask_folder_suffix = config.mask_folder_suffix
mask_suffix = config.mask_suffix
# the path to the model prediction id, entropy and confidence
segmentation_folder = config.segmentation_folder
segmentation_suffix = config.segmentation_suffix
segmentation_suffix_noimg = config.segmentation_suffix_noimg
confidence_folder = config.confidence_folder
confidence_suffix = config.confidence_suffix
entropy_folder = config.entropy_folder
entropy_suffix = config.entropy_suffix
# the path to the original image
image_folder = config.image_folder
image_suffix = config.image_suffix
# the path to the ground truth
gt_folder = config.gt_folder
gt_suffix = config.gt_suffix
# the path to the output folder
output_folder = config.output_folder
# num of classes
num_classes = config.num_classes
### fusion parameters
# sgml process
get_sam_mode= config.get_sam_mode
use_sgml = config.use_sgml
# C_l and C_s classes
large_classes = config.large_classes # [0, 1, 2, 8, 10, 13]
small_classes = config.small_classes # [3, 4, 5, 6, 7, 9, 11, 12, 14, 15, 16, 17, 18]
# sam alpha
sam_alpha = config.sam_alpha
adaptive_ratio = config.adaptive_ratio
# road assumption
road_assumption = config.road_assumption
road_center_rect = config.road_center_rect
# the fusion mode
fusion_mode = config.fusion_mode
sam_classes = config.sam_classes # 11 classes, 5, 6, 7,
shrink_num = config.shrink_num
if 'BiSeNet-uda' in config.segmentation_folder:
confidence_threshold = config.confidence_threshold_tufl
entropy_ratio = config.entropy_ratio_tufl
elif 'MIC' in config.segmentation_folder:
confidence_threshold = config.confidence_threshold_mic
entropy_ratio = config.entropy_ratio_mic
else:
confidence_threshold = config.confidence_threshold_daformer
entropy_ratio = config.entropy_ratio_daformer
### display parameters
display_size = config.display_size
mix_ratio = config.mix_ratio
resize_ratio = config.resize_ratio
### save params
save_mix_result = config.save_mix_result
save_sam_result = config.save_sam_result
save_all_fusion = config.save_all_fusion # [True, True, True, False, False]
save_majority_process = config.save_majority_process
save_sgml_process = config.save_sgml_process
save_f1_process = config.save_f1_process
save_f2_process = config.save_f2_process
save_f3_process = config.save_f3_process
### time setting
time_process = config.time_process
time_filename = config.time_filename
### sample index setting
debug_num = config.debug_num # 2975
begin_index = config.begin_index # 0
fusion = Fusion_GTA(mask_folder,
mask_folder_suffix,
mask_suffix,
segmentation_folder,
segmentation_suffix,
segmentation_suffix_noimg,
confidence_folder,
confidence_suffix,
entropy_folder,
entropy_suffix,
image_folder,
image_suffix,
gt_folder,
gt_suffix,
output_folder,
num_classes,
fusion_mode,
road_assumption,
road_center_rect,
get_sam_mode,
use_sgml,
sam_alpha,
adaptive_ratio,
large_classes,
small_classes,
sam_classes,
shrink_num,
display_size,
mix_ratio,
resize_ratio,
time_process,
time_filename,
save_sgml_process,
save_majority_process,
save_f1_process,
save_f2_process,
save_f3_process
)
index_range = list(range(begin_index, begin_index + debug_num))
iou_cal = SegmentationMetrics(num_classes=num_classes)
if save_all_fusion:
f1_output_folder = os.path.join(output_folder, 'fusion1_trainid')
f1_color_output_folder = os.path.join(output_folder, 'fusion1_color')
f2_output_folder = os.path.join(output_folder, 'fusion2_trainid')
f2_color_output_folder = os.path.join(output_folder, 'fusion2_color')
f3_output_folder = os.path.join(output_folder, 'fusion3_trainid')
f3_color_output_folder = os.path.join(output_folder, 'fusion3_color')
# f4_output_folder = os.path.join(output_folder, 'fusion4_trainid')
# f5_output_folder = os.path.join(output_folder, 'fusion5_trainid')
fusion.check_and_make(f1_output_folder)
fusion.check_and_make(f1_color_output_folder)
fusion.check_and_make(f2_output_folder)
fusion.check_and_make(f2_color_output_folder)
fusion.check_and_make(f3_output_folder)
fusion.check_and_make(f3_color_output_folder)
# fusion.check_and_make(f4_output_folder)
# fusion.check_and_make(f5_output_folder)
if save_sam_result:
sam_majority_output_folder = os.path.join(output_folder, 'sam_majority_trainid')
sam_majority_color_output_folder = os.path.join(output_folder, 'sam_majority_color')
sam_sgml_output_folder = os.path.join(output_folder, 'sam_sgml_trainid')
sam_sgml_color_output_folder = os.path.join(output_folder, 'sam_sgml_color')
f1_majority_output_folder = os.path.join(output_folder, 'fusion1_majority_trainid')
f1_majority_color_output_folder = os.path.join(output_folder, 'fusion1_majority_color')
fusion.check_and_make(sam_majority_output_folder)
fusion.check_and_make(sam_majority_color_output_folder)
fusion.check_and_make(sam_sgml_output_folder)
fusion.check_and_make(sam_sgml_color_output_folder)
fusion.check_and_make(f1_majority_output_folder)
fusion.check_and_make(f1_majority_color_output_folder)
bar = tqdm.tqdm(total=debug_num)
for i in index_range:
# get the image name
image_name = fusion.image_names[i] # aachen_000000_000019_leftImg8bit
image_name = image_name.replace(mask_folder_suffix, '') # aachen_000000_000019
# get the uda prediction
# aachen_000000_000019_leftImg8bittrainID.png
prediction_path = os.path.join(fusion.segmentation_folder, image_name + fusion.segmentation_suffix)
if fusion.segmentation_suffix_noimg:
prediction_path = prediction_path.replace('_leftImg8bit', '')
uda_pred = cv2.imread(prediction_path, 0) # [h, w], 1 rgb channel
uda_pred_color = fusion.color_segmentation(uda_pred)
# get the confidence map
confidence_path = os.path.join(fusion.confidence_folder, image_name + fusion.confidence_suffix)
pred_confidence = np.load(confidence_path, allow_pickle=True) # [h, w]
# get the entropy map
entropy_path = os.path.join(fusion.entropy_folder, image_name + fusion.entropy_suffix)
pred_entropy = np.load(entropy_path, allow_pickle=True) # [h, w]
# get the ground truth
gt_path = os.path.join(fusion.gt_folder, image_name + fusion.gt_suffix)
gt = cv2.imread(gt_path, 0) # [h, w, 3]
gt_color = fusion.color_segmentation(gt)
# get the original image
original_image = cv2.imread(os.path.join(fusion.image_folder, image_name + fusion.image_suffix))
# get the confidence and entropy map
pred_confidence = pred_confidence.astype(np.float32)
confidence_map = fusion.visualize_numpy(pred_confidence)
confidence_mask, confidence_img = fusion.vis_np_higher_thres(pred_confidence, original_image, confidence_threshold)
pred_entropy = pred_entropy.astype(np.float32)
entropy_map = fusion.visualize_numpy(pred_entropy)
entropy_threshold = np.percentile(pred_entropy, entropy_ratio)
entropy_mask, entropy_img = fusion.vis_np_lower_thres(pred_entropy, original_image, entropy_threshold)
# get the sam segmentation result using the mask
sam_pred_sgml, sam_pred_majority = fusion.get_sam_pred(image_name, uda_pred, confidence_mask, entropy_mask) # [h,w]
sam_color_sgml = fusion.color_segmentation(sam_pred_sgml) # [h,w,3]
sam_majority_color = fusion.color_segmentation(sam_pred_majority) # [h,w,3]
if use_sgml:
sam_pred = sam_pred_sgml
else:
sam_pred = sam_pred_majority
image_filename = image_name + fusion.mask_suffix
# Initialize the black image and ignore label image only once
black_img = np.zeros((original_image.shape[0], original_image.shape[1], 3), dtype=np.uint8)
ignore_lb = np.full((original_image.shape[0], original_image.shape[1]), 255, dtype=np.uint8)
# Use list comprehensions or tuples to create multiple copies
fusion_color_bg_list = [black_img.copy() for _ in range(5)]
error_list = [black_img.copy() for _ in range(5)]
fusion_trainid_bg_list = [ignore_lb.copy() for _ in range(5)]
# Unpack lists if needed to assign to individual variables
fusion_color_bg_1, fusion_color_bg_2, fusion_color_bg_3, fusion_color_bg_4, fusion_color_bg_5 = fusion_color_bg_list
error_1, error_2, error_3, error_4, error_5 = error_list
fusion_trainid_bg_1, fusion_trainid_bg_2, fusion_trainid_bg_3, fusion_trainid_bg_4, fusion_trainid_bg_5 = fusion_trainid_bg_list
# get fusion result from 1, 2, 3, 4, 5
fusion_trainid_bg_1, fusion_color_bg_1 = fusion.fusion_mode_1(uda_pred=uda_pred, sam_pred=sam_pred, image_name=image_name)
if save_sam_result:
fusion_trainid_bg_1_majority, fusion_color_bg_1_majority = fusion.fusion_mode_1(uda_pred=uda_pred, sam_pred=sam_pred_majority, image_name=image_name)
fusion_trainid_bg_2, fusion_color_bg_2 = fusion.fusion_mode_2(uda_pred=uda_pred, sam_pred=sam_pred, image_name=image_name)
fusion_trainid_bg_3, fusion_color_bg_3 = fusion.fusion_mode_3(uda_pred=uda_pred, sam_pred=sam_pred, fusion_trainid=fusion_trainid_bg_1,
confidence_mask=confidence_mask, entropy_mask=entropy_mask, image_name=image_name)
# fusion_trainid_bg_4, fusion_color_bg_4 = fusion.fusion_mode_4(uda_pred=uda_pred, sam_pred=sam_pred, \
# fusion_trainid=fusion_trainid_bg_3, confidence_mask=confidence_mask)
# fusion_trainid_bg_5, fusion_color_bg_5 = fusion.fusion_mode_5(uda_pred=uda_pred, sam_pred=sam_pred, \
# fusion_trainid=fusion_trainid_bg_3, entropy_mask=entropy_mask)
# save the sam result
if save_sam_result:
# save sam psuedo label using majority voting and sgml
cv2.imwrite(os.path.join(sam_majority_output_folder, image_filename), sam_pred_majority)
cv2.imwrite(os.path.join(sam_majority_color_output_folder, image_filename), sam_majority_color)
cv2.imwrite(os.path.join(sam_sgml_output_folder, image_filename), sam_pred_sgml)
cv2.imwrite(os.path.join(sam_sgml_color_output_folder, image_filename), sam_color_sgml)
# save sam majority f1 fusion result
cv2.imwrite(os.path.join(f1_majority_output_folder, image_filename), fusion_trainid_bg_1_majority)
cv2.imwrite(os.path.join(f1_majority_color_output_folder, image_filename), fusion_color_bg_1_majority)
miou_0, miou_1, miou_2, miou_3, miou_4, miou_5 = -1, -1, -1, -1, -1, -1
ious_0, ious_1, ious_2, ious_3, ious_4, ious_5 = [], [], [], [], [], []
miou_0, ious_0 = iou_cal.calculate_miou(uda_pred, gt)
miou_1, ious_1 = iou_cal.calculate_miou(fusion_trainid_bg_1, gt)
miou_2, ious_2 = iou_cal.calculate_miou(fusion_trainid_bg_2, gt)
miou_3, ious_3 = iou_cal.calculate_miou(fusion_trainid_bg_3, gt)
# miou_4, ious_4 = iou_cal.calculate_miou(fusion_trainid_bg_4, gt)
# miou_5, ious_5 = iou_cal.calculate_miou(fusion_trainid_bg_5, gt)
error_0 = fusion.get_error_image(uda_pred, gt, uda_pred_color)
error_1 = fusion.get_error_image(fusion_trainid_bg_1, gt, fusion_color_bg_1)
error_2 = fusion.get_error_image(fusion_trainid_bg_2, gt, fusion_color_bg_2)
error_3 = fusion.get_error_image(fusion_trainid_bg_3, gt, fusion_color_bg_3)
# error_4 = fusion.get_error_image(fusion_trainid_bg_4, gt, fusion_color_bg_4)
# error_5 = fusion.get_error_image(fusion_trainid_bg_5, gt, fusion_color_bg_5)
# display the results
fusion.dis_imgs_horizontal(
[original_image, gt_color, sam_color_sgml, uda_pred_color, error_0, \
fusion_color_bg_1, fusion_color_bg_2, fusion_color_bg_3, fusion_color_bg_4, fusion_color_bg_5, \
error_1, error_2, error_3, error_4, error_5, \
confidence_map, entropy_map, confidence_img, entropy_img], \
image_name, \
[(miou_0, ious_0), (miou_1, ious_1), (miou_2, ious_2), \
(miou_3, ious_3), (miou_4, ious_4), (miou_5, ious_5)], \
[confidence_threshold, entropy_threshold])
# save the mious and ious
miou_values = [miou_0, miou_1, miou_2, miou_3, miou_4, miou_5]
ious_values = [ious_0, ious_1, ious_2, ious_3, ious_4, ious_5]
fusion.save_ious(miou_values, ious_values, image_name)
# save all fusion results
if save_all_fusion:
cv2.imwrite(os.path.join(f1_output_folder, image_filename), fusion_trainid_bg_1)
cv2.imwrite(os.path.join(f1_color_output_folder, image_filename), fusion_color_bg_1)
cv2.imwrite(os.path.join(f2_output_folder, image_filename), fusion_trainid_bg_2)
cv2.imwrite(os.path.join(f2_color_output_folder, image_filename), fusion_color_bg_2)
cv2.imwrite(os.path.join(f3_output_folder, image_filename), fusion_trainid_bg_3)
cv2.imwrite(os.path.join(f3_color_output_folder, image_filename), fusion_color_bg_3)
# cv2.imwrite(os.path.join(f4_output_folder, image_filename), fusion_trainid_bg_4)
# cv2.imwrite(os.path.join(f5_output_folder, image_filename), fusion_trainid_bg_5)
# save mix results
if save_mix_result:
# get the sam mixed color image using the fusion.mix_ratio
sam_mixed_color = cv2.addWeighted(original_image, fusion.mix_ratio, sam_color_sgml, 1 - fusion.mix_ratio, 0)
if fusion.resize_ratio != 1:
new_h = int(sam_mixed_color.shape[0] * fusion.resize_ratio)
new_w = int(sam_mixed_color.shape[1] * fusion.resize_ratio)
sam_mixed_color = cv2.resize(sam_mixed_color, (new_w, new_h), interpolation=cv2.INTER_NEAREST)
# save the sam mask in trainid and color to the output folder
cv2.imwrite(os.path.join(fusion.output_folder, 'trainID', image_filename), sam_pred)
# cv2.imwrite(os.path.join(fusion.output_folder, 'color', image_filename), fusion_color)
cv2.imwrite(os.path.join(fusion.output_folder, 'mixed', image_filename), sam_mixed_color)
# make the fusion results to list for easy use
fusion_trainid_bgs = [fusion_trainid_bg_1, fusion_trainid_bg_2, fusion_trainid_bg_3, fusion_trainid_bg_4, fusion_trainid_bg_5]
fusion_color_bgs = [fusion_color_bg_1, fusion_color_bg_2, fusion_color_bg_3, fusion_color_bg_4, fusion_color_bg_5]
mode = fusion.fusion_mode - 1
if mode in range(len(fusion_trainid_bgs)):
fusion_trainid_bg, fusion_color_bg = fusion_trainid_bgs[mode], fusion_color_bgs[mode]
else:
raise NotImplementedError("This fusion mode has not been implemented yet.")
#save the fusion mask in trainid and color to the output folder
mixed_color_bg = cv2.addWeighted(original_image, fusion.mix_ratio, fusion_color_bg, 1 - fusion.mix_ratio, 0)
if fusion.resize_ratio != 1:
mixed_color_bg = cv2.resize(mixed_color_bg, (int(mixed_color_bg.shape[1] * fusion.resize_ratio), int(mixed_color_bg.shape[0] * fusion.resize_ratio)), interpolation=cv2.INTER_NEAREST)
cv2.imwrite(os.path.join(fusion.output_folder, 'trainID_bg', image_filename), fusion_trainid_bg)
# cv2.imwrite(os.path.join(fusion.output_folder, 'color_bg', image_filename), fusion_color_bg)
cv2.imwrite(os.path.join(fusion.output_folder, 'mixed_bg', image_filename), mixed_color_bg)
bar.update(1)
bar.close()
if time_process:
fusion.show_time_process()