Skip to content
This repository was archived by the owner on Jan 22, 2025. It is now read-only.

Commit f56149f

Browse files
committed
PR comments
1 parent 3141b4b commit f56149f

File tree

3 files changed

+34
-24
lines changed

3 files changed

+34
-24
lines changed

local-cluster/tests/common/mod.rs

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -123,17 +123,18 @@ pub fn purge_slots_with_count(blockstore: &Blockstore, start_slot: Slot, slot_co
123123
pub fn wait_for_last_vote_in_tower_to_land_in_ledger(
124124
ledger_path: &Path,
125125
node_pubkey: &Pubkey,
126-
) -> Slot {
127-
let (last_vote, _) = last_vote_in_tower(ledger_path, node_pubkey).unwrap();
128-
loop {
129-
// We reopen in a loop to make sure we get updates
130-
let blockstore = open_blockstore(ledger_path);
131-
if blockstore.is_full(last_vote) {
132-
break;
126+
) -> Option<Slot> {
127+
last_vote_in_tower(ledger_path, node_pubkey).map(|(last_vote, _)| {
128+
loop {
129+
// We reopen in a loop to make sure we get updates
130+
let blockstore = open_blockstore(ledger_path);
131+
if blockstore.is_full(last_vote) {
132+
break;
133+
}
134+
sleep(Duration::from_millis(100));
133135
}
134-
sleep(Duration::from_millis(100));
135-
}
136-
last_vote
136+
last_vote
137+
})
137138
}
138139

139140
pub fn copy_blocks(end_slot: Slot, source: &Blockstore, dest: &Blockstore) {

local-cluster/tests/local_cluster.rs

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3264,7 +3264,8 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
32643264
{
32653265
// Find latest vote in B, and wait for it to reach blockstore
32663266
let b_last_vote =
3267-
wait_for_last_vote_in_tower_to_land_in_ledger(&val_b_ledger_path, &validator_b_pubkey);
3267+
wait_for_last_vote_in_tower_to_land_in_ledger(&val_b_ledger_path, &validator_b_pubkey)
3268+
.unwrap();
32683269

32693270
// Now we copy these blocks to A
32703271
let b_blockstore = open_blockstore(&val_b_ledger_path);
@@ -3474,11 +3475,13 @@ fn test_fork_choice_refresh_old_votes() {
34743475
let lighter_fork_latest_vote = wait_for_last_vote_in_tower_to_land_in_ledger(
34753476
&lighter_fork_ledger_path,
34763477
&context.lighter_fork_validator_key,
3477-
);
3478+
)
3479+
.unwrap();
34783480
let heaviest_fork_latest_vote = wait_for_last_vote_in_tower_to_land_in_ledger(
34793481
&heaviest_ledger_path,
34803482
&context.heaviest_validator_key,
3481-
);
3483+
)
3484+
.unwrap();
34823485

34833486
// Open ledgers
34843487
let smallest_blockstore = open_blockstore(&smallest_ledger_path);
@@ -4434,7 +4437,8 @@ fn test_slot_hash_expiry() {
44344437
let mut last_vote_on_a;
44354438
// Keep A running for a while longer so the majority fork has some decent size
44364439
loop {
4437-
last_vote_on_a = wait_for_last_vote_in_tower_to_land_in_ledger(&a_ledger_path, &a_pubkey);
4440+
last_vote_on_a =
4441+
wait_for_last_vote_in_tower_to_land_in_ledger(&a_ledger_path, &a_pubkey).unwrap();
44384442
if last_vote_on_a
44394443
>= common_ancestor_slot + 2 * (solana_sdk::slot_hashes::get_entries() as u64)
44404444
{
@@ -4460,7 +4464,8 @@ fn test_slot_hash_expiry() {
44604464
info!("Allowing B to fork");
44614465
loop {
44624466
let blockstore = open_blockstore(&b_ledger_path);
4463-
let last_vote = wait_for_last_vote_in_tower_to_land_in_ledger(&b_ledger_path, &b_pubkey);
4467+
let last_vote =
4468+
wait_for_last_vote_in_tower_to_land_in_ledger(&b_ledger_path, &b_pubkey).unwrap();
44644469
let mut ancestors = AncestorIterator::new(last_vote, &blockstore);
44654470
if let Some(index) = ancestors.position(|x| x == common_ancestor_slot) {
44664471
if index > 7 {
@@ -4674,7 +4679,8 @@ fn test_duplicate_with_pruned_ancestor() {
46744679
last_minority_vote
46754680
);
46764681
let last_minority_vote =
4677-
wait_for_last_vote_in_tower_to_land_in_ledger(&minority_ledger_path, &minority_pubkey);
4682+
wait_for_last_vote_in_tower_to_land_in_ledger(&minority_ledger_path, &minority_pubkey)
4683+
.unwrap();
46784684
let minority_validator_info = cluster.exit_node(&minority_pubkey);
46794685

46804686
info!("Truncating majority validator ledger to {fork_slot}");
@@ -4720,7 +4726,8 @@ fn test_duplicate_with_pruned_ancestor() {
47204726
}
47214727

47224728
let last_majority_vote =
4723-
wait_for_last_vote_in_tower_to_land_in_ledger(&majority_ledger_path, &majority_pubkey);
4729+
wait_for_last_vote_in_tower_to_land_in_ledger(&majority_ledger_path, &majority_pubkey)
4730+
.unwrap();
47244731
info!(
47254732
"Creating duplicate block built off of pruned branch for our node.
47264733
Last majority vote {last_majority_vote}, Last minority vote {last_minority_vote}"
@@ -5222,7 +5229,8 @@ fn test_duplicate_shreds_switch_failure() {
52225229
// The `target_switch_fork_validator_pubkey` fork is necessary in 2. to force the validator stall trying to switch
52235230
// vote on that other fork and prevent the validator from making a freebie vote from `A` and allowing consensus to continue.
52245231

5225-
// It's important we give the `duplicate_fork_validator1_pubkey` very few leader slots so that:
5232+
// It's important we don't give the `duplicate_fork_validator1_pubkey` leader slots until a certain number
5233+
// of slots have elapsed to ensure:
52265234
// 1. We have ample time to ensure he doesn't have a chance to make a block until after 2 when they see the block is duplicate.
52275235
// Otherwise, they'll build the block on top of the duplicate block, which will possibly include a vote for the duplicate block.
52285236
// We want to avoid this because this will make fork choice pick the duplicate block.
@@ -5417,11 +5425,11 @@ fn test_duplicate_shreds_switch_failure() {
54175425

54185426
info!("Waiting for switch fork to make block past duplicate fork");
54195427
loop {
5420-
let last_vote = last_vote_in_tower(
5428+
let last_vote = wait_for_last_vote_in_tower_to_land_in_ledger(
54215429
&target_switch_fork_validator_ledger_path,
54225430
&target_switch_fork_validator_pubkey,
54235431
);
5424-
if let Some((latest_vote_slot, _hash)) = last_vote {
5432+
if let Some(latest_vote_slot) = last_vote {
54255433
if latest_vote_slot > dup_slot {
54265434
let blockstore = open_blockstore(&target_switch_fork_validator_ledger_path);
54275435
let ancestor_slots: HashSet<Slot> =

turbine/src/broadcast_stage/broadcast_duplicates_run.rs

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,8 @@ pub enum ClusterPartition {
2424

2525
#[derive(Clone, Debug)]
2626
pub struct BroadcastDuplicatesConfig {
27-
/// Amount of stake (excluding the leader) to send different version of slots to.
27+
/// Amount of stake (excluding the leader) or a set of validator pubkeys
28+
/// to send a duplicate version of some slots to.
2829
/// Note this is sampled from a list of stakes sorted least to greatest.
2930
pub partition: ClusterPartition,
3031
/// If passed `Some(receiver)`, will signal all the duplicate slots via the given
@@ -295,7 +296,7 @@ impl BroadcastRun for BroadcastDuplicatesRun {
295296
let cluster_partition: HashSet<Pubkey> = {
296297
match &self.config.partition {
297298
ClusterPartition::Stake(partition_total_stake) => {
298-
let mut cumilative_stake = 0;
299+
let mut cumulative_stake = 0;
299300
let epoch = root_bank.get_leader_schedule_epoch(slot);
300301
root_bank
301302
.epoch_staked_nodes(epoch)
@@ -304,8 +305,8 @@ impl BroadcastRun for BroadcastDuplicatesRun {
304305
.filter(|(pubkey, _)| **pubkey != self_pubkey)
305306
.sorted_by_key(|(pubkey, stake)| (**stake, **pubkey))
306307
.take_while(|(_, stake)| {
307-
cumilative_stake += *stake;
308-
cumilative_stake <= *partition_total_stake
308+
cumulative_stake += *stake;
309+
cumulative_stake <= *partition_total_stake
309310
})
310311
.map(|(pubkey, _)| *pubkey)
311312
.collect()

0 commit comments

Comments
 (0)