Skip to content

Commit

Permalink
update config and run, clean later
Browse files Browse the repository at this point in the history
  • Loading branch information
tiantiaf0627 committed Mar 25, 2022
1 parent 08da12c commit dabc56f
Show file tree
Hide file tree
Showing 2 changed files with 164 additions and 34 deletions.
14 changes: 9 additions & 5 deletions config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -8,25 +8,29 @@ save_dir = /media/data/projects/speech-privacy
process_feature = False
process_training = False
ser_training = False
attack_training = True
attack_training = False
attack_result = True

[feature]
feature = emobase
feature = decoar2

[dataset]
private_dataset = crema-d
adv_dataset = iemocap_msp-improv
private_dataset = iemocap
adv_dataset = msp-improv_crema-d

[model]
dropout = 0.2
fed_model = fed_avg
udp = True
num_sample = 10
privacy_budget = 25

[fed_avg]
lr = 0.0005
local_epochs = 1
global_epochs = 200

[fed_sgd]
lr = 0.05
lr = 0.1
local_epochs = 1
global_epochs = 200
184 changes: 155 additions & 29 deletions run.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@

print('Extract features')
print(cmd_str)
pdb.set_trace()
os.system(cmd_str)

# 2. process training data
Expand All @@ -39,37 +40,162 @@
print(cmd_str)
os.system(cmd_str)

# 3. Training SER model
# 3.1 Training SER model
if config['mode'].getboolean('ser_training') is True:
for dataset in [config['dataset']['private_dataset'], config['dataset']['adv_dataset']]:
cmd_str = 'taskset 100 python3 train/federated_ser_classifier.py --dataset ' + dataset
cmd_str += ' --feature_type ' + config['feature']['feature']
cmd_str += ' --dropout ' + config['model']['dropout']
cmd_str += ' --norm znorm --optimizer adam'
cmd_str += ' --model_type ' + config['model']['fed_model']
cmd_str += ' --learning_rate ' + config[config['model']['fed_model']]['lr']
cmd_str += ' --local_epochs ' + config[config['model']['fed_model']]['local_epochs']
cmd_str += ' --num_epochs ' + config[config['model']['fed_model']]['global_epochs']
cmd_str += ' --save_dir ' + config['dir']['save_dir']

print('Traing SER model')
print(cmd_str)
os.system(cmd_str)
# for dataset in [config['dataset']['private_dataset'], config['dataset']['adv_dataset']]:
# for dataset in [config['dataset']['adv_dataset']]:
# for dataset in ['iemocap', 'crema-d', 'msp-improv', 'iemocap_crema-d', 'iemocap_msp-improv', 'msp-improv_crema-d']:
# for dataset in ['iemocap']:
# for dataset in ['msp-improv', 'iemocap', 'crema-d']:
for dataset in ['iemocap']:
# for dataset in ['crema-d']:
# for dataset in ['msp-improv']:
# for dataset in ['iemocap_crema-d', 'iemocap_msp-improv', 'msp-improv_crema-d']:
# for dataset in ['iemocap_msp-improv']:
# for dataset in ['iemocap_crema-d']:
# for feature in ['tera', 'decoar2', 'npc']:
# for feature in ['emobase', 'apc', 'vq_apc']:
# for feature in ['emobase', 'apc', 'vq_apc', 'tera', 'decoar2']:
for feature in ['emobase']:
# for feature in ['tera']:
# for feature in ['decoar2']:
# for feature in ['apc']:
# for feature in ['vq_apc']:
if config['model'].getboolean('udp'):
cmd_str = 'taskset 300 python3 train/federated_ser_classifier_udp.py --dataset ' + dataset
cmd_str += ' --privacy_budget ' + config['model']['privacy_budget']
else:
cmd_str = 'taskset 300 python3 train/federated_ser_classifier.py --dataset ' + dataset
# cmd_str += ' --feature_type ' + config['feature']['feature']
cmd_str += ' --feature_type ' + feature
cmd_str += ' --dropout ' + config['model']['dropout']
cmd_str += ' --norm znorm --optimizer adam'
cmd_str += ' --model_type ' + config['model']['fed_model']
cmd_str += ' --learning_rate ' + config[config['model']['fed_model']]['lr']
cmd_str += ' --local_epochs ' + config[config['model']['fed_model']]['local_epochs']
cmd_str += ' --num_epochs ' + config[config['model']['fed_model']]['global_epochs']
cmd_str += ' --save_dir ' + config['dir']['save_dir']

print('Traing SER model')
print(cmd_str)
# pdb.set_trace()
os.system(cmd_str)

# 4. Training attack model
if config['mode'].getboolean('attack_training') is True:
cmd_str = 'taskset 100 python3 train/federated_attribute_attack.py --dataset ' + config['dataset']['private_dataset']
cmd_str += ' --norm znorm --optimizer adam'
cmd_str += ' --adv_dataset ' + config['dataset']['adv_dataset']
cmd_str += ' --feature_type ' + config['feature']['feature']
cmd_str += ' --dropout ' + config['model']['dropout']
cmd_str += ' --model_type ' + config['model']['fed_model']
cmd_str += ' --learning_rate ' + config[config['model']['fed_model']]['lr']
cmd_str += ' --local_epochs ' + config[config['model']['fed_model']]['local_epochs']
cmd_str += ' --num_epochs ' + config[config['model']['fed_model']]['global_epochs']
cmd_str += ' --save_dir ' + config['dir']['save_dir']
cmd_str += ' --leak_layer first --model_learning_rate 0.0001'

print('Traing Attack model')
print(cmd_str)
os.system(cmd_str)
for dataset_list in [['iemocap', 'msp-improv_crema-d'],
['crema-d', 'iemocap_msp-improv'],
['msp-improv', 'iemocap_crema-d']]:
# for dataset_list in [['msp-improv', 'iemocap_crema-d']]:
# for dataset_list in [['crema-d', 'iemocap_msp-improv']]:
# for dataset_list in [['iemocap', 'msp-improv_crema-d'],
# ['msp-improv', 'iemocap_crema-d']]:
# for feature in ['emobase', 'apc', 'vq_apc', 'tera', 'decoar2', 'npc']:
for feature in ['tera', 'decoar2', 'emobase', 'apc', 'vq_apc']:
# for feature in ['emobase']:
# for feature in ['vq_apc']:
# for feature in ['tera', 'decoar2', 'npc']:
# for feature in ['emobase', 'apc', 'vq_apc']:
if config['model'].getint('attack_sample') == 1:
cmd_str = 'taskset 500 python3 train/federated_attribute_attack.py'
else:
cmd_str = 'taskset 500 python3 train/federated_attribute_attack_multiple.py --num_sample ' + config['model']['attack_sample']
cmd_str += ' --norm znorm --optimizer adam'
# cmd_str += ' --dataset ' + config['dataset']['private_dataset']
# cmd_str += ' --adv_dataset ' + config['dataset']['adv_dataset']
# cmd_str += ' --feature_type ' + config['feature']['feature']

cmd_str += ' --dataset ' + dataset_list[0]
cmd_str += ' --adv_dataset ' + dataset_list[1]
cmd_str += ' --feature_type ' + feature

cmd_str += ' --dropout ' + config['model']['dropout']
cmd_str += ' --model_type ' + config['model']['fed_model']
cmd_str += ' --learning_rate ' + config[config['model']['fed_model']]['lr']
cmd_str += ' --local_epochs ' + config[config['model']['fed_model']]['local_epochs']
cmd_str += ' --num_epochs ' + config[config['model']['fed_model']]['global_epochs']
cmd_str += ' --leak_layer first --model_learning_rate 0.0001'
# cmd_str += ' --leak_layer first --model_learning_rate 0.0001'
cmd_str += ' --device 0'
cmd_str += ' --save_dir ' + config['dir']['save_dir']

# if config['model'].getboolean('udp'):
# cmd_str += ' --privacy_budget ' + config['model']['privacy_budget']

print('Traing Attack model')
print(cmd_str)
# pdb.set_trace()
os.system(cmd_str)

# 5.1 Loading attack model
if config['mode'].getboolean('attack_result') is True:
for dataset_list in [['iemocap', 'msp-improv_crema-d'],
['crema-d', 'iemocap_msp-improv'],
['msp-improv', 'iemocap_crema-d']]:
# for dataset_list in [['crema-d', 'iemocap_msp-improv']]:
# for dataset_list in [['msp-improv', 'iemocap_crema-d']]:
# for dataset_list in [['iemocap', 'msp-improv_crema-d']]:
# for dataset_list in [['msp-improv', 'iemocap_crema-d']]:
for feature in ['tera', 'decoar2', 'emobase', 'apc', 'vq_apc']:
for privacy_budget in [0, 5, 10, 25, 50]:
# for privacy_budget in [5]:
cmd_str = 'taskset 500 python3 train/federated_attribute_attack_result_per_speaker.py'
cmd_str += ' --norm znorm'
cmd_str += ' --dataset ' + dataset_list[0]
cmd_str += ' --adv_dataset ' + dataset_list[1]
cmd_str += ' --feature_type ' + feature
cmd_str += ' --dropout ' + config['model']['dropout']
cmd_str += ' --model_type ' + config['model']['fed_model']
cmd_str += ' --learning_rate ' + config[config['model']['fed_model']]['lr']
cmd_str += ' --local_epochs ' + config[config['model']['fed_model']]['local_epochs']
cmd_str += ' --num_epochs ' + config[config['model']['fed_model']]['global_epochs']
cmd_str += ' --leak_layer first --device 0'
cmd_str += ' --save_dir ' + config['dir']['save_dir']
cmd_str += ' --num_sample ' + config['model']['num_sample']

# if config['model'].getboolean('udp'):
if privacy_budget is not 0:
cmd_str += ' --privacy_budget ' + str(privacy_budget)

print('Attack model result')
print(cmd_str)
# pdb.set_trace()
os.system(cmd_str)

# 5.2 Loading attack model and finetune
if config['mode'].getboolean('attack_result_finetune') is True:
# for dataset_list in [['iemocap', 'msp-improv_crema-d'],
# ['crema-d', 'iemocap_msp-improv'],
# ['msp-improv', 'iemocap_crema-d']]:
# for dataset_list in [['crema-d', 'iemocap_msp-improv']]:
# for dataset_list in [['msp-improv', 'iemocap_crema-d']]:
for dataset_list in [['iemocap', 'msp-improv_crema-d']]:
# for dataset_list in [['msp-improv', 'iemocap_crema-d']]:
for feature in ['tera', 'decoar2', 'emobase', 'apc', 'vq_apc']:
# for feature in ['tera']:
for privacy_budget in [10, 25, 50, 5]:
# for privacy_budget in [5]:
cmd_str = 'taskset 500 python3 train/federated_attribute_attack_result_finetune.py'
cmd_str += ' --norm znorm'
cmd_str += ' --dataset ' + dataset_list[0]
cmd_str += ' --adv_dataset ' + dataset_list[1]
cmd_str += ' --feature_type ' + feature
cmd_str += ' --dropout ' + config['model']['dropout']
cmd_str += ' --model_type ' + config['model']['fed_model']
cmd_str += ' --learning_rate ' + config[config['model']['fed_model']]['lr']
cmd_str += ' --local_epochs ' + config[config['model']['fed_model']]['local_epochs']
cmd_str += ' --num_epochs ' + config[config['model']['fed_model']]['global_epochs']
cmd_str += ' --num_sample ' + config['model']['num_sample']

cmd_str += ' --leak_layer first --device 0'
cmd_str += ' --save_dir ' + config['dir']['save_dir']

# if config['model'].getboolean('udp'):
if privacy_budget is not 0:
cmd_str += ' --privacy_budget ' + str(privacy_budget)

print('Attack model result')
print(cmd_str)
# pdb.set_trace()
os.system(cmd_str)

0 comments on commit dabc56f

Please sign in to comment.