@@ -77,6 +77,14 @@ pub fn restore_tower(tower_path: &Path, node_pubkey: &Pubkey) -> Option<Tower> {
77
77
Tower :: restore ( & file_tower_storage, node_pubkey) . ok ( )
78
78
}
79
79
80
+ pub fn remove_tower_if_exists ( tower_path : & Path , node_pubkey : & Pubkey ) {
81
+ let file_tower_storage = FileTowerStorage :: new ( tower_path. to_path_buf ( ) ) ;
82
+ let filename = file_tower_storage. filename ( node_pubkey) ;
83
+ if filename. exists ( ) {
84
+ fs:: remove_file ( file_tower_storage. filename ( node_pubkey) ) . unwrap ( ) ;
85
+ }
86
+ }
87
+
80
88
pub fn remove_tower ( tower_path : & Path , node_pubkey : & Pubkey ) {
81
89
let file_tower_storage = FileTowerStorage :: new ( tower_path. to_path_buf ( ) ) ;
82
90
fs:: remove_file ( file_tower_storage. filename ( node_pubkey) ) . unwrap ( ) ;
@@ -120,17 +128,18 @@ pub fn purge_slots_with_count(blockstore: &Blockstore, start_slot: Slot, slot_co
120
128
pub fn wait_for_last_vote_in_tower_to_land_in_ledger (
121
129
ledger_path : & Path ,
122
130
node_pubkey : & Pubkey ,
123
- ) -> Slot {
124
- let ( last_vote, _) = last_vote_in_tower ( ledger_path, node_pubkey) . unwrap ( ) ;
125
- loop {
126
- // We reopen in a loop to make sure we get updates
127
- let blockstore = open_blockstore ( ledger_path) ;
128
- if blockstore. is_full ( last_vote) {
129
- break ;
131
+ ) -> Option < Slot > {
132
+ last_vote_in_tower ( ledger_path, node_pubkey) . map ( |( last_vote, _) | {
133
+ loop {
134
+ // We reopen in a loop to make sure we get updates
135
+ let blockstore = open_blockstore ( ledger_path) ;
136
+ if blockstore. is_full ( last_vote) {
137
+ break ;
138
+ }
139
+ sleep ( Duration :: from_millis ( 100 ) ) ;
130
140
}
131
- sleep ( Duration :: from_millis ( 100 ) ) ;
132
- }
133
- last_vote
141
+ last_vote
142
+ } )
134
143
}
135
144
136
145
pub fn copy_blocks ( end_slot : Slot , source : & Blockstore , dest : & Blockstore ) {
@@ -390,40 +399,66 @@ pub fn run_cluster_partition<C>(
390
399
on_partition_resolved ( & mut cluster, & mut context) ;
391
400
}
392
401
402
+ pub struct ValidatorTestConfig {
403
+ pub validator_keypair : Arc < Keypair > ,
404
+ pub validator_config : ValidatorConfig ,
405
+ pub in_genesis : bool ,
406
+ }
407
+
393
408
pub fn test_faulty_node (
394
409
faulty_node_type : BroadcastStageType ,
395
410
node_stakes : Vec < u64 > ,
411
+ validator_test_configs : Option < Vec < ValidatorTestConfig > > ,
412
+ custom_leader_schedule : Option < FixedSchedule > ,
396
413
) -> ( LocalCluster , Vec < Arc < Keypair > > ) {
397
- solana_logger:: setup_with_default ( "solana_local_cluster=info" ) ;
398
414
let num_nodes = node_stakes. len ( ) ;
399
- let mut validator_keys = Vec :: with_capacity ( num_nodes) ;
400
- validator_keys. resize_with ( num_nodes, || ( Arc :: new ( Keypair :: new ( ) ) , true ) ) ;
415
+ let validator_keys = validator_test_configs
416
+ . as_ref ( )
417
+ . map ( |configs| {
418
+ configs
419
+ . iter ( )
420
+ . map ( |config| ( config. validator_keypair . clone ( ) , config. in_genesis ) )
421
+ . collect ( )
422
+ } )
423
+ . unwrap_or_else ( || {
424
+ let mut validator_keys = Vec :: with_capacity ( num_nodes) ;
425
+ validator_keys. resize_with ( num_nodes, || ( Arc :: new ( Keypair :: new ( ) ) , true ) ) ;
426
+ validator_keys
427
+ } ) ;
428
+
401
429
assert_eq ! ( node_stakes. len( ) , num_nodes) ;
402
430
assert_eq ! ( validator_keys. len( ) , num_nodes) ;
403
431
404
- // Use a fixed leader schedule so that only the faulty node gets leader slots.
405
- let validator_to_slots = vec ! [ (
406
- validator_keys[ 0 ] . 0 . as_ref( ) . pubkey( ) ,
407
- solana_sdk:: clock:: DEFAULT_DEV_SLOTS_PER_EPOCH as usize ,
408
- ) ] ;
409
- let leader_schedule = create_custom_leader_schedule ( validator_to_slots. into_iter ( ) ) ;
410
- let fixed_leader_schedule = Some ( FixedSchedule {
411
- leader_schedule : Arc :: new ( leader_schedule) ,
432
+ let fixed_leader_schedule = custom_leader_schedule. unwrap_or_else ( || {
433
+ // Use a fixed leader schedule so that only the faulty node gets leader slots.
434
+ let validator_to_slots = vec ! [ (
435
+ validator_keys[ 0 ] . 0 . as_ref( ) . pubkey( ) ,
436
+ solana_sdk:: clock:: DEFAULT_DEV_SLOTS_PER_EPOCH as usize ,
437
+ ) ] ;
438
+ let leader_schedule = create_custom_leader_schedule ( validator_to_slots. into_iter ( ) ) ;
439
+ FixedSchedule {
440
+ leader_schedule : Arc :: new ( leader_schedule) ,
441
+ }
412
442
} ) ;
413
443
414
- let error_validator_config = ValidatorConfig {
415
- broadcast_stage_type : faulty_node_type,
416
- fixed_leader_schedule : fixed_leader_schedule. clone ( ) ,
417
- ..ValidatorConfig :: default_for_test ( )
418
- } ;
419
- let mut validator_configs = Vec :: with_capacity ( num_nodes) ;
444
+ let mut validator_configs = validator_test_configs
445
+ . map ( |configs| {
446
+ configs
447
+ . into_iter ( )
448
+ . map ( |config| config. validator_config )
449
+ . collect ( )
450
+ } )
451
+ . unwrap_or_else ( || {
452
+ let mut configs = Vec :: with_capacity ( num_nodes) ;
453
+ configs. resize_with ( num_nodes, ValidatorConfig :: default_for_test) ;
454
+ configs
455
+ } ) ;
420
456
421
457
// First validator is the bootstrap leader with the malicious broadcast logic.
422
- validator_configs. push ( error_validator_config) ;
423
- validator_configs. resize_with ( num_nodes, || ValidatorConfig {
424
- fixed_leader_schedule : fixed_leader_schedule. clone ( ) ,
425
- ..ValidatorConfig :: default_for_test ( )
426
- } ) ;
458
+ validator_configs[ 0 ] . broadcast_stage_type = faulty_node_type;
459
+ for config in & mut validator_configs {
460
+ config. fixed_leader_schedule = Some ( fixed_leader_schedule. clone ( ) ) ;
461
+ }
427
462
428
463
let mut cluster_config = ClusterConfig {
429
464
cluster_lamports : 10_000 ,
0 commit comments