Skip to content

Commit

Permalink
further update model files
Browse files Browse the repository at this point in the history
  • Loading branch information
tiantiaf0627 committed Dec 8, 2021
1 parent d8e43ee commit 5b10c1c
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 97 deletions.
99 changes: 2 additions & 97 deletions model/attack_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,24 +5,20 @@
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import pdb
from pytorch_lightning.core.lightning import LightningModule
import numpy as np
from sklearn.metrics import accuracy_score, recall_score
from sklearn.metrics import confusion_matrix
from torch.optim.lr_scheduler import ReduceLROnPlateau


class attack_model(LightningModule):
class attack_model(nn.Module):
def __init__(self, leak_layer, feature_type):

super(attack_model, self).__init__()
self.dropout_p = 0.2
self.num_emo_classes = 4
self.num_gender_class = 2
self.num_affect_classes = 3
self.leak_layer = leak_layer
self.lr = 0.0001

Expand Down Expand Up @@ -115,8 +111,6 @@ def __init__(self, leak_layer, feature_type):
self.test_results = None
self.init_weight()

self.test_result = {}

def init_weight(self):
for m in self._modules:
if type(m) == nn.Linear:
Expand Down Expand Up @@ -148,93 +142,4 @@ def forward(self, weights, bias):

preds = self.pred_layer(z)
preds = torch.log_softmax(preds, dim=1)
return preds

def cross_entropy_loss(self, logits, labels):
return F.nll_loss(logits, labels)

def training_step(self, train_batch, batch_idx):
weights, bias, gender = train_batch
logits = self.forward(weights.unsqueeze(dim=1), bias)
loss = self.cross_entropy_loss(logits, gender)

predictions = np.argmax(logits.detach().cpu().numpy(), axis=1)
pred_list, truth_list = [], []
for pred_idx in range(len(predictions)):
pred_list.append(predictions[pred_idx])
truth_list.append(gender.detach().cpu().numpy()[pred_idx])

return {'loss': loss, 'pred': pred_list, 'truth': truth_list}

def training_epoch_end(self, train_step_outputs):
result_dict = self.result_summary(train_step_outputs, self.current_epoch, mode='train')
self.log('train_loss', result_dict['loss'], on_epoch=True)
self.log('train_acc_epoch', result_dict['acc'], on_epoch=True)
self.log('train_uar_epoch', result_dict['uar'], on_epoch=True)

def validation_step(self, val_batch, batch_idx):
weights, bias, gender = val_batch
logits = self.forward(weights.unsqueeze(dim=1), bias)
loss = self.cross_entropy_loss(logits, gender)
predictions = np.argmax(logits.detach().cpu().numpy(), axis=1)
pred_list, truth_list = [], []
for pred_idx in range(len(predictions)):
pred_list.append(predictions[pred_idx])
truth_list.append(gender.detach().cpu().numpy()[pred_idx])

return {'loss': loss, 'pred': pred_list, 'truth': truth_list}

def validation_epoch_end(self, val_step_outputs):
result_dict = self.result_summary(val_step_outputs, self.current_epoch, mode='validation')
self.log('val_loss', result_dict['loss'], on_epoch=True)
self.log('val_acc_epoch', result_dict['acc'], on_epoch=True)
self.log('val_uar_epoch', result_dict['uar'], on_epoch=True)

def test_step(self, test_batch, batch_nb):
weights, bias, gender = test_batch
logits = self.forward(weights.unsqueeze(dim=1), bias)
predictions = np.argmax(logits.detach().cpu().numpy(), axis=1)
pred_list, truth_list = [], []
for pred_idx in range(len(predictions)):
pred_list.append(predictions[pred_idx])
truth_list.append(gender.detach().cpu().numpy()[pred_idx])
return {'pred': pred_list, 'truth': truth_list}

def test_epoch_end(self, test_step_outputs):
result_dict = self.result_summary(test_step_outputs, 0, mode='test')
self.log('test_acc_epoch', result_dict['acc'], on_epoch=True)
self.log('test_uar_epoch', result_dict['uar'], on_epoch=True)

def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
scheduler = ReduceLROnPlateau(optimizer, mode='min', patience=5, factor=0.5, verbose=True)
return optimizer

def result_summary(self, step_outputs, epoch, mode='train'):

loss_list, y_true, y_pred = [], [], []
for step in range(len(step_outputs)):
for idx in range(len(step_outputs[step]['pred'])):
y_true.append(step_outputs[step]['truth'][idx])
y_pred.append(step_outputs[step]['pred'][idx])
if mode != 'test': loss_list.append(step_outputs[step]['loss'].item())

result_dict = {}
acc_score = accuracy_score(y_true, y_pred)
rec_score = recall_score(y_true, y_pred, average='macro')
confusion_matrix_arr = np.round(confusion_matrix(y_true, y_pred, normalize='true')*100, decimals=2)

result_dict['acc'] = acc_score
result_dict['uar'] = rec_score
result_dict['conf'] = confusion_matrix_arr

print()
if mode != 'test':
result_dict['loss'] = np.mean(loss_list)
print('%s accuracy %.3f / recall %.3f / loss %.3f after %d' % (mode, acc_score, rec_score, np.mean(loss_list), epoch))
print()
else:
print('%s accuracy %.3f / recall %.3f' % (mode, acc_score, rec_score))
print()
print(confusion_matrix_arr)
return result_dict
return preds
56 changes: 56 additions & 0 deletions model/dnn_models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Tiantian
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import pdb
from torch.nn.modules import dropout
import itertools

class_dict = {'emotion': 4, 'affect': 3, 'gender': 2}

class dnn_classifier(nn.Module):
def __init__(self, pred, input_spec, dropout):

super(dnn_classifier, self).__init__()
self.dropout_p = dropout
self.num_classes = class_dict[pred]

self.dropout = nn.Dropout(p=self.dropout_p)
self.dense_relu1 = nn.ReLU()
self.dense_relu2 = nn.ReLU()

self.dense1 = nn.Linear(input_spec, 256)
self.dense2 = nn.Linear(256, 128)

self.pred_layer = nn.Linear(128, self.num_classes)
self.init_weight()

def init_weight(self):
for m in self._modules:
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
if type(m) == nn.Conv2d:
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)

def forward(self, input_var):

x = input_var.float()
x = self.dense1(x)
x = self.dense_relu1(x)
x = self.dropout(x)

x = self.dense2(x)
x = self.dense_relu2(x)
x = nn.Dropout(p=0.2)(x)

preds = self.pred_layer(x)
preds = torch.log_softmax(preds, dim=1)

return preds

0 comments on commit 5b10c1c

Please sign in to comment.