-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy patheval.py
More file actions
107 lines (85 loc) · 3.8 KB
/
eval.py
File metadata and controls
107 lines (85 loc) · 3.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import csv
import os
import time
import torch.distributed
import torch.utils.data
import tqdm
import utils
def evaluate(model: torch.nn.Module,
valloader: torch.utils.data.DataLoader,
criterion: torch.nn.Module,
num_classes: int,
amp_enabled: bool,
ddp_enabled: bool,
device: torch.device):
model.eval()
if ddp_enabled:
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 0
evaluator = utils.metrics.Evaluator(num_classes, device)
inference_time = torch.zeros(1, device=device)
val_loss = torch.zeros(1, device=device)
for images, targets in tqdm.tqdm(valloader, desc='Eval', leave=False, disable=False if local_rank == 0 else True):
images, targets = images.to(device), targets.to(device)
with torch.cuda.amp.autocast(enabled=amp_enabled):
torch.cuda.synchronize()
start_time = time.time()
with torch.no_grad():
outputs = model(images)
torch.cuda.synchronize()
inference_time += time.time() - start_time
val_loss += criterion(outputs, targets)
# Make segmentation map
outputs = torch.argmax(outputs, dim=1)
# Update confusion matrix
evaluator.update_matrix(targets, outputs)
if ddp_enabled:
val_loss_list = [val_loss]
confusion_matrix_list = [evaluator.confusion_matrix]
inference_time_list = [inference_time]
torch.distributed.all_reduce_multigpu(val_loss_list, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce_multigpu(confusion_matrix_list, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce_multigpu(inference_time_list, op=torch.distributed.ReduceOp.SUM)
val_loss = val_loss_list[0] / (len(valloader) * world_size)
evaluator.confusion_matrix = confusion_matrix_list[0]
iou, miou = evaluator.mean_intersection_over_union(percent=True)
inference_time = inference_time_list[0] / (len(valloader) * world_size)
fps = 1 / inference_time
else:
val_loss /= len(valloader)
iou, miou = evaluator.mean_intersection_over_union(percent=True)
inference_time /= len(valloader)
fps = 1 / inference_time
return val_loss.item(), iou, miou.item(), fps.item()
if __name__ == '__main__':
# Load cfg and create components builder
cfg = utils.builder.load_cfg()
builder = utils.builder.Builder(cfg)
# Device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 1. Dataset
valset, valloader = builder.build_dataset('val')
# 2. Model
model = builder.build_model(valset.num_classes, pretrained=True).to(device)
model_name = cfg['model']['name']
amp_enabled = cfg['model']['amp_enabled']
print(f'Activated model: {model_name}')
# 3. Loss function
criterion = builder.build_criterion(valset.ignore_index)
# Evaluate model
val_loss, iou, miou, fps = evaluate(model, valloader, criterion, valset.num_classes, amp_enabled, False, device)
# Save evaluation result as csv file
os.makedirs('result', exist_ok=True)
class_names = valset.class_names
with open(os.path.join('result', f'{model_name}.csv'), mode='w', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=',', lineterminator='\n')
writer.writerow(['Class Number', 'Class Name', 'IoU'])
for class_num, iou_value in enumerate(iou):
writer.writerow([class_num, class_names[class_num], iou_value.item()])
writer.writerow(['mIoU', miou, ' '])
writer.writerow(['Validation loss', val_loss, ' '])
writer.writerow(['FPS', fps, ' '])
print('Saved evaluation result.')