-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
148 lines (126 loc) · 4.46 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import os
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
from data.utils.BatchSampler import GroupSampler
from data.utils.DataLoader import MyCollater, MyDataLoader
from data.utils.ParentGraphDataset import ParentGraphsDataset
from model_training import test_model, train_model
from models import SREXmodel
def main(parameters):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
torch.manual_seed(42)
instances_VRP = [
"X-n439-k37",
"X-n393-k38",
"X-n449-k29",
"ORTEC-n405-k18",
"ORTEC-n510-k23",
"X-n573-k30",
]
instances_TW = ["ORTEC-VRPTW-ASYM-0bdff870-d1-n458-k35", "R2_8_9", "R1_4_10"]
instances = instances_VRP + instances_TW
use_instances = ["X-n439-k37", "X-n393-k38"]
# Train_test split 772 cvrp files, 386 tw files FILE 88 331 are corrupted
training = list(range(0, 88)) + list(range(89, 331)) + list(range(332, 618))
test = list(range(618, 680))
train_file_names = [f"batch_cvrp_{i}_rawdata.pkl" for i in training]
# train_file_names.extend([f"batch_tw_{i}_rawdata.pkl" for i in range(308)])
# test_batches
test_file_names = [f"batch_cvrp_{i}_rawdata.pkl" for i in test]
# test_file_names.extend([f"batch_tw_{i}_rawdata.pkl" for i in range(308, 386)])
trainset = ParentGraphsDataset(
root=osp.join(os.getcwd(), "data/model_data"),
raw_files=train_file_names,
instances=instances,
is_processed=True,
use_instances=use_instances,
use_time=False,
)
testset = ParentGraphsDataset(
root=osp.join(os.getcwd(), "data/model_data"),
raw_files=test_file_names,
instances=instances,
is_processed=True,
use_instances=use_instances,
use_time=False,
)
sampler = GroupSampler(data_length=len(trainset), group_size=12, batch_size=2)
train_loader = MyDataLoader(
dataset=trainset,
batch_sampler=sampler,
num_workers=0,
collate_fn=MyCollater(None, None),
)
sampler = GroupSampler(data_length=len(testset), group_size=12, batch_size=2)
test_loader = MyDataLoader(
dataset=testset,
batch_sampler=sampler,
num_workers=0,
collate_fn=MyCollater(None, None),
)
model = SREXmodel(
num_node_features=trainset.num_node_features,
hidden_dim=parameters["hidden_dim"],
num_heads=parameters["num_heads"],
dropout=0.1,
)
model.to(device)
print(model)
optimizer = torch.optim.Adam(model.parameters(), lr=parameters["learning_rate"])
loss_func = nn.BCELoss(reduction="sum")
nr_epochs = parameters["epochs"]
print("Train data metrics: ", trainset.get_accuracy_scores())
print("Test data metrics: ", testset.get_accuracy_scores())
for epoch in range(nr_epochs):
tot_train_loss, avg_train_loss, train_metric, result = train_model(
model,
device,
train_loader,
optimizer,
loss_func,
trainset.processed_dir,
parameters,
epoch,
)
tot_test_loss, avg_test_loss, test_metric = test_model(
model,
device,
test_loader,
loss_func,
testset.processed_dir,
parameters,
epoch,
)
print(
f'Epoch {epoch + 1} / {nr_epochs} [======] - train_loss(Tot, Avg): {"{:.2f}".format(tot_train_loss)},'
f' {"{:.2f}".format(avg_train_loss)} - test_loss : {"{:.2f}".format(avg_test_loss)}, \n'
f"{train_metric} \n"
f"{test_metric}"
)
if epoch > 80:
obj_dict = {
"model_state": model.state_dict(),
"Metrics_train": train_metric,
"Metrics_test": test_metric,
"Results": result,
}
torch.save(
obj_dict,
f'{trainset.root}/model_states/SrexGNN_{parameters["run"]}_{epoch}_{"{:.2f}".format(train_metric.f1)}_{"{:.2f}".format(train_metric.select_acc)}_{"{:.2f}".format(train_metric.select_high)}',
)
return result
if __name__ == "__main__":
# Training Parameters
parameters = {
"learning_rate": 0.001,
"pos_weight": 6,
"epochs": 110,
"binary_label": True,
"run": 1,
"hidden_dim": 8,
"num_heads": 8,
"weight": "confuse",
}
result = main(parameters)